summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_ssa
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_ssa')
-rw-r--r--compiler/rustc_codegen_ssa/Cargo.toml48
-rw-r--r--compiler/rustc_codegen_ssa/README.md3
-rw-r--r--compiler/rustc_codegen_ssa/src/back/archive.rs69
-rw-r--r--compiler/rustc_codegen_ssa/src/back/command.rs178
-rw-r--r--compiler/rustc_codegen_ssa/src/back/link.rs2800
-rw-r--r--compiler/rustc_codegen_ssa/src/back/linker.rs1788
-rw-r--r--compiler/rustc_codegen_ssa/src/back/lto.rs104
-rw-r--r--compiler/rustc_codegen_ssa/src/back/metadata.rs314
-rw-r--r--compiler/rustc_codegen_ssa/src/back/mod.rs9
-rw-r--r--compiler/rustc_codegen_ssa/src/back/rpath.rs114
-rw-r--r--compiler/rustc_codegen_ssa/src/back/rpath/tests.rs72
-rw-r--r--compiler/rustc_codegen_ssa/src/back/symbol_export.rs590
-rw-r--r--compiler/rustc_codegen_ssa/src/back/write.rs2015
-rw-r--r--compiler/rustc_codegen_ssa/src/base.rs961
-rw-r--r--compiler/rustc_codegen_ssa/src/common.rs223
-rw-r--r--compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs85
-rw-r--r--compiler/rustc_codegen_ssa/src/coverageinfo/map.rs347
-rw-r--r--compiler/rustc_codegen_ssa/src/coverageinfo/mod.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/debuginfo/mod.rs34
-rw-r--r--compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs821
-rw-r--r--compiler/rustc_codegen_ssa/src/glue.rs123
-rw-r--r--compiler/rustc_codegen_ssa/src/lib.rs244
-rw-r--r--compiler/rustc_codegen_ssa/src/meth.rs116
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/analyze.rs368
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs1654
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/constant.rs90
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs55
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/debuginfo.rs418
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/intrinsic.rs636
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/mod.rs410
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/operand.rs461
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/place.rs549
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs729
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/statement.rs102
-rw-r--r--compiler/rustc_codegen_ssa/src/mono_item.rs147
-rw-r--r--compiler/rustc_codegen_ssa/src/target_features.rs308
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/abi.rs8
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/asm.rs66
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/backend.rs161
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/builder.rs481
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/consts.rs41
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs57
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/debuginfo.rs79
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/declare.rs21
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/intrinsic.rs39
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/misc.rs26
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/mod.rs102
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/statics.rs24
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/type_.rs151
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/write.rs68
50 files changed, 18311 insertions, 0 deletions
diff --git a/compiler/rustc_codegen_ssa/Cargo.toml b/compiler/rustc_codegen_ssa/Cargo.toml
new file mode 100644
index 000000000..46d6344db
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/Cargo.toml
@@ -0,0 +1,48 @@
+[package]
+name = "rustc_codegen_ssa"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+test = false
+
+[dependencies]
+bitflags = "1.2.1"
+cc = "1.0.69"
+itertools = "0.10.1"
+tracing = "0.1"
+libc = "0.2.50"
+jobserver = "0.1.22"
+tempfile = "3.2"
+thorin-dwp = "0.3"
+pathdiff = "0.2.0"
+serde_json = "1.0.59"
+snap = "1"
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+regex = "1.4"
+
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_arena = { path = "../rustc_arena" }
+rustc_ast = { path = "../rustc_ast" }
+rustc_span = { path = "../rustc_span" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_apfloat = { path = "../rustc_apfloat" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_symbol_mangling = { path = "../rustc_symbol_mangling" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_fs_util = { path = "../rustc_fs_util" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_incremental = { path = "../rustc_incremental" }
+rustc_index = { path = "../rustc_index" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_metadata = { path = "../rustc_metadata" }
+rustc_query_system = { path = "../rustc_query_system" }
+rustc_target = { path = "../rustc_target" }
+rustc_session = { path = "../rustc_session" }
+rustc_const_eval = { path = "../rustc_const_eval" }
+
+[dependencies.object]
+version = "0.29.0"
+default-features = false
+features = ["read_core", "elf", "macho", "pe", "unaligned", "archive", "write"]
diff --git a/compiler/rustc_codegen_ssa/README.md b/compiler/rustc_codegen_ssa/README.md
new file mode 100644
index 000000000..7b770187b
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/README.md
@@ -0,0 +1,3 @@
+Please read the rustc-dev-guide chapter on [Backend Agnostic Codegen][bac].
+
+[bac]: https://rustc-dev-guide.rust-lang.org/backend/backend-agnostic.html
diff --git a/compiler/rustc_codegen_ssa/src/back/archive.rs b/compiler/rustc_codegen_ssa/src/back/archive.rs
new file mode 100644
index 000000000..0d2aa483d
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/archive.rs
@@ -0,0 +1,69 @@
+use rustc_session::cstore::DllImport;
+use rustc_session::Session;
+
+use std::io;
+use std::path::{Path, PathBuf};
+
+pub(super) fn find_library(
+ name: &str,
+ verbatim: bool,
+ search_paths: &[PathBuf],
+ sess: &Session,
+) -> PathBuf {
+ // On Windows, static libraries sometimes show up as libfoo.a and other
+ // times show up as foo.lib
+ let oslibname = if verbatim {
+ name.to_string()
+ } else {
+ format!("{}{}{}", sess.target.staticlib_prefix, name, sess.target.staticlib_suffix)
+ };
+ let unixlibname = format!("lib{}.a", name);
+
+ for path in search_paths {
+ debug!("looking for {} inside {:?}", name, path);
+ let test = path.join(&oslibname);
+ if test.exists() {
+ return test;
+ }
+ if oslibname != unixlibname {
+ let test = path.join(&unixlibname);
+ if test.exists() {
+ return test;
+ }
+ }
+ }
+ sess.fatal(&format!(
+ "could not find native static library `{}`, \
+ perhaps an -L flag is missing?",
+ name
+ ));
+}
+
+pub trait ArchiveBuilderBuilder {
+ fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder<'a> + 'a>;
+
+ /// Creates a DLL Import Library <https://docs.microsoft.com/en-us/windows/win32/dlls/dynamic-link-library-creation#creating-an-import-library>.
+ /// and returns the path on disk to that import library.
+ /// This functions doesn't take `self` so that it can be called from
+ /// `linker_with_args`, which is specialized on `ArchiveBuilder` but
+ /// doesn't take or create an instance of that type.
+ fn create_dll_import_lib(
+ &self,
+ sess: &Session,
+ lib_name: &str,
+ dll_imports: &[DllImport],
+ tmpdir: &Path,
+ ) -> PathBuf;
+}
+
+pub trait ArchiveBuilder<'a> {
+ fn add_file(&mut self, path: &Path);
+
+ fn add_archive(
+ &mut self,
+ archive: &Path,
+ skip: Box<dyn FnMut(&str) -> bool + 'static>,
+ ) -> io::Result<()>;
+
+ fn build(self: Box<Self>, output: &Path) -> bool;
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/command.rs b/compiler/rustc_codegen_ssa/src/back/command.rs
new file mode 100644
index 000000000..9b0ba3413
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/command.rs
@@ -0,0 +1,178 @@
+//! A thin wrapper around `Command` in the standard library which allows us to
+//! read the arguments that are built up.
+
+use std::ffi::{OsStr, OsString};
+use std::fmt;
+use std::io;
+use std::mem;
+use std::process::{self, Output};
+
+use rustc_target::spec::LldFlavor;
+
+#[derive(Clone)]
+pub struct Command {
+ program: Program,
+ args: Vec<OsString>,
+ env: Vec<(OsString, OsString)>,
+ env_remove: Vec<OsString>,
+}
+
+#[derive(Clone)]
+enum Program {
+ Normal(OsString),
+ CmdBatScript(OsString),
+ Lld(OsString, LldFlavor),
+}
+
+impl Command {
+ pub fn new<P: AsRef<OsStr>>(program: P) -> Command {
+ Command::_new(Program::Normal(program.as_ref().to_owned()))
+ }
+
+ pub fn bat_script<P: AsRef<OsStr>>(program: P) -> Command {
+ Command::_new(Program::CmdBatScript(program.as_ref().to_owned()))
+ }
+
+ pub fn lld<P: AsRef<OsStr>>(program: P, flavor: LldFlavor) -> Command {
+ Command::_new(Program::Lld(program.as_ref().to_owned(), flavor))
+ }
+
+ fn _new(program: Program) -> Command {
+ Command { program, args: Vec::new(), env: Vec::new(), env_remove: Vec::new() }
+ }
+
+ pub fn arg<P: AsRef<OsStr>>(&mut self, arg: P) -> &mut Command {
+ self._arg(arg.as_ref());
+ self
+ }
+
+ pub fn args<I>(&mut self, args: I) -> &mut Command
+ where
+ I: IntoIterator<Item: AsRef<OsStr>>,
+ {
+ for arg in args {
+ self._arg(arg.as_ref());
+ }
+ self
+ }
+
+ fn _arg(&mut self, arg: &OsStr) {
+ self.args.push(arg.to_owned());
+ }
+
+ pub fn env<K, V>(&mut self, key: K, value: V) -> &mut Command
+ where
+ K: AsRef<OsStr>,
+ V: AsRef<OsStr>,
+ {
+ self._env(key.as_ref(), value.as_ref());
+ self
+ }
+
+ fn _env(&mut self, key: &OsStr, value: &OsStr) {
+ self.env.push((key.to_owned(), value.to_owned()));
+ }
+
+ pub fn env_remove<K>(&mut self, key: K) -> &mut Command
+ where
+ K: AsRef<OsStr>,
+ {
+ self._env_remove(key.as_ref());
+ self
+ }
+
+ fn _env_remove(&mut self, key: &OsStr) {
+ self.env_remove.push(key.to_owned());
+ }
+
+ pub fn output(&mut self) -> io::Result<Output> {
+ self.command().output()
+ }
+
+ pub fn command(&self) -> process::Command {
+ let mut ret = match self.program {
+ Program::Normal(ref p) => process::Command::new(p),
+ Program::CmdBatScript(ref p) => {
+ let mut c = process::Command::new("cmd");
+ c.arg("/c").arg(p);
+ c
+ }
+ Program::Lld(ref p, flavor) => {
+ let mut c = process::Command::new(p);
+ c.arg("-flavor").arg(flavor.as_str());
+ if let LldFlavor::Wasm = flavor {
+ // LLVM expects host-specific formatting for @file
+ // arguments, but we always generate posix formatted files
+ // at this time. Indicate as such.
+ c.arg("--rsp-quoting=posix");
+ }
+ c
+ }
+ };
+ ret.args(&self.args);
+ ret.envs(self.env.clone());
+ for k in &self.env_remove {
+ ret.env_remove(k);
+ }
+ ret
+ }
+
+ // extensions
+
+ pub fn get_args(&self) -> &[OsString] {
+ &self.args
+ }
+
+ pub fn take_args(&mut self) -> Vec<OsString> {
+ mem::take(&mut self.args)
+ }
+
+ /// Returns a `true` if we're pretty sure that this'll blow OS spawn limits,
+ /// or `false` if we should attempt to spawn and see what the OS says.
+ pub fn very_likely_to_exceed_some_spawn_limit(&self) -> bool {
+ // We mostly only care about Windows in this method, on Unix the limits
+ // can be gargantuan anyway so we're pretty unlikely to hit them
+ if cfg!(unix) {
+ return false;
+ }
+
+ // Right now LLD doesn't support the `@` syntax of passing an argument
+ // through files, so regardless of the platform we try to go to the OS
+ // on this one.
+ if let Program::Lld(..) = self.program {
+ return false;
+ }
+
+ // Ok so on Windows to spawn a process is 32,768 characters in its
+ // command line [1]. Unfortunately we don't actually have access to that
+ // as it's calculated just before spawning. Instead we perform a
+ // poor-man's guess as to how long our command line will be. We're
+ // assuming here that we don't have to escape every character...
+ //
+ // Turns out though that `cmd.exe` has even smaller limits, 8192
+ // characters [2]. Linkers can often be batch scripts (for example
+ // Emscripten, Gecko's current build system) which means that we're
+ // running through batch scripts. These linkers often just forward
+ // arguments elsewhere (and maybe tack on more), so if we blow 8192
+ // bytes we'll typically cause them to blow as well.
+ //
+ // Basically as a result just perform an inflated estimate of what our
+ // command line will look like and test if it's > 8192 (we actually
+ // test against 6k to artificially inflate our estimate). If all else
+ // fails we'll fall back to the normal unix logic of testing the OS
+ // error code if we fail to spawn and automatically re-spawning the
+ // linker with smaller arguments.
+ //
+ // [1]: https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-createprocessa
+ // [2]: https://devblogs.microsoft.com/oldnewthing/?p=41553
+
+ let estimated_command_line_len = self.args.iter().map(|a| a.len()).sum::<usize>();
+ estimated_command_line_len > 1024 * 6
+ }
+}
+
+impl fmt::Debug for Command {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.command().fmt(f)
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs
new file mode 100644
index 000000000..63207803e
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/link.rs
@@ -0,0 +1,2800 @@
+use rustc_arena::TypedArena;
+use rustc_ast::CRATE_NODE_ID;
+use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
+use rustc_data_structures::memmap::Mmap;
+use rustc_data_structures::temp_dir::MaybeTempDir;
+use rustc_errors::{ErrorGuaranteed, Handler};
+use rustc_fs_util::fix_windows_verbatim_for_gcc;
+use rustc_hir::def_id::CrateNum;
+use rustc_metadata::fs::{emit_metadata, METADATA_FILENAME};
+use rustc_middle::middle::dependency_format::Linkage;
+use rustc_middle::middle::exported_symbols::SymbolExportKind;
+use rustc_session::config::{self, CFGuard, CrateType, DebugInfo, LdImpl, Strip};
+use rustc_session::config::{OutputFilenames, OutputType, PrintRequest, SplitDwarfKind};
+use rustc_session::cstore::DllImport;
+use rustc_session::output::{check_file_is_writeable, invalid_output_for_target, out_filename};
+use rustc_session::search_paths::PathKind;
+use rustc_session::utils::NativeLibKind;
+/// For all the linkers we support, and information they might
+/// need out of the shared crate context before we get rid of it.
+use rustc_session::{filesearch, Session};
+use rustc_span::symbol::Symbol;
+use rustc_span::DebuggerVisualizerFile;
+use rustc_target::spec::crt_objects::{CrtObjects, CrtObjectsFallback};
+use rustc_target::spec::{LinkOutputKind, LinkerFlavor, LldFlavor, SplitDebuginfo};
+use rustc_target::spec::{PanicStrategy, RelocModel, RelroLevel, SanitizerSet, Target};
+
+use super::archive::{find_library, ArchiveBuilder, ArchiveBuilderBuilder};
+use super::command::Command;
+use super::linker::{self, Linker};
+use super::metadata::{create_rmeta_file, MetadataPosition};
+use super::rpath::{self, RPathConfig};
+use crate::{looks_like_rust_object_file, CodegenResults, CompiledModule, CrateInfo, NativeLib};
+
+use cc::windows_registry;
+use regex::Regex;
+use tempfile::Builder as TempFileBuilder;
+
+use std::borrow::Borrow;
+use std::cell::OnceCell;
+use std::collections::BTreeSet;
+use std::ffi::OsString;
+use std::fs::{File, OpenOptions};
+use std::io::{BufWriter, Write};
+use std::ops::Deref;
+use std::path::{Path, PathBuf};
+use std::process::{ExitStatus, Output, Stdio};
+use std::{ascii, char, env, fmt, fs, io, mem, str};
+
+pub fn ensure_removed(diag_handler: &Handler, path: &Path) {
+ if let Err(e) = fs::remove_file(path) {
+ if e.kind() != io::ErrorKind::NotFound {
+ diag_handler.err(&format!("failed to remove {}: {}", path.display(), e));
+ }
+ }
+}
+
+/// Performs the linkage portion of the compilation phase. This will generate all
+/// of the requested outputs for this compilation session.
+pub fn link_binary<'a>(
+ sess: &'a Session,
+ archive_builder_builder: &dyn ArchiveBuilderBuilder,
+ codegen_results: &CodegenResults,
+ outputs: &OutputFilenames,
+) -> Result<(), ErrorGuaranteed> {
+ let _timer = sess.timer("link_binary");
+ let output_metadata = sess.opts.output_types.contains_key(&OutputType::Metadata);
+ for &crate_type in sess.crate_types().iter() {
+ // Ignore executable crates if we have -Z no-codegen, as they will error.
+ if (sess.opts.unstable_opts.no_codegen || !sess.opts.output_types.should_codegen())
+ && !output_metadata
+ && crate_type == CrateType::Executable
+ {
+ continue;
+ }
+
+ if invalid_output_for_target(sess, crate_type) {
+ bug!(
+ "invalid output type `{:?}` for target os `{}`",
+ crate_type,
+ sess.opts.target_triple
+ );
+ }
+
+ sess.time("link_binary_check_files_are_writeable", || {
+ for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) {
+ check_file_is_writeable(obj, sess);
+ }
+ });
+
+ if outputs.outputs.should_link() {
+ let tmpdir = TempFileBuilder::new()
+ .prefix("rustc")
+ .tempdir()
+ .unwrap_or_else(|err| sess.fatal(&format!("couldn't create a temp dir: {}", err)));
+ let path = MaybeTempDir::new(tmpdir, sess.opts.cg.save_temps);
+ let out_filename = out_filename(
+ sess,
+ crate_type,
+ outputs,
+ codegen_results.crate_info.local_crate_name.as_str(),
+ );
+ match crate_type {
+ CrateType::Rlib => {
+ let _timer = sess.timer("link_rlib");
+ info!("preparing rlib to {:?}", out_filename);
+ link_rlib(
+ sess,
+ archive_builder_builder,
+ codegen_results,
+ RlibFlavor::Normal,
+ &path,
+ )?
+ .build(&out_filename);
+ }
+ CrateType::Staticlib => {
+ link_staticlib(
+ sess,
+ archive_builder_builder,
+ codegen_results,
+ &out_filename,
+ &path,
+ )?;
+ }
+ _ => {
+ link_natively(
+ sess,
+ archive_builder_builder,
+ crate_type,
+ &out_filename,
+ codegen_results,
+ path.as_ref(),
+ )?;
+ }
+ }
+ if sess.opts.json_artifact_notifications {
+ sess.parse_sess.span_diagnostic.emit_artifact_notification(&out_filename, "link");
+ }
+
+ if sess.prof.enabled() {
+ if let Some(artifact_name) = out_filename.file_name() {
+ // Record size for self-profiling
+ let file_size = std::fs::metadata(&out_filename).map(|m| m.len()).unwrap_or(0);
+
+ sess.prof.artifact_size(
+ "linked_artifact",
+ artifact_name.to_string_lossy(),
+ file_size,
+ );
+ }
+ }
+ }
+ }
+
+ // Remove the temporary object file and metadata if we aren't saving temps.
+ sess.time("link_binary_remove_temps", || {
+ // If the user requests that temporaries are saved, don't delete any.
+ if sess.opts.cg.save_temps {
+ return;
+ }
+
+ let maybe_remove_temps_from_module =
+ |preserve_objects: bool, preserve_dwarf_objects: bool, module: &CompiledModule| {
+ if !preserve_objects {
+ if let Some(ref obj) = module.object {
+ ensure_removed(sess.diagnostic(), obj);
+ }
+ }
+
+ if !preserve_dwarf_objects {
+ if let Some(ref dwo_obj) = module.dwarf_object {
+ ensure_removed(sess.diagnostic(), dwo_obj);
+ }
+ }
+ };
+
+ let remove_temps_from_module =
+ |module: &CompiledModule| maybe_remove_temps_from_module(false, false, module);
+
+ // Otherwise, always remove the metadata and allocator module temporaries.
+ if let Some(ref metadata_module) = codegen_results.metadata_module {
+ remove_temps_from_module(metadata_module);
+ }
+
+ if let Some(ref allocator_module) = codegen_results.allocator_module {
+ remove_temps_from_module(allocator_module);
+ }
+
+ // If no requested outputs require linking, then the object temporaries should
+ // be kept.
+ if !sess.opts.output_types.should_link() {
+ return;
+ }
+
+ // Potentially keep objects for their debuginfo.
+ let (preserve_objects, preserve_dwarf_objects) = preserve_objects_for_their_debuginfo(sess);
+ debug!(?preserve_objects, ?preserve_dwarf_objects);
+
+ for module in &codegen_results.modules {
+ maybe_remove_temps_from_module(preserve_objects, preserve_dwarf_objects, module);
+ }
+ });
+
+ Ok(())
+}
+
+pub fn each_linked_rlib(
+ info: &CrateInfo,
+ f: &mut dyn FnMut(CrateNum, &Path),
+) -> Result<(), String> {
+ let crates = info.used_crates.iter();
+ let mut fmts = None;
+ for (ty, list) in info.dependency_formats.iter() {
+ match ty {
+ CrateType::Executable
+ | CrateType::Staticlib
+ | CrateType::Cdylib
+ | CrateType::ProcMacro => {
+ fmts = Some(list);
+ break;
+ }
+ _ => {}
+ }
+ }
+ let Some(fmts) = fmts else {
+ return Err("could not find formats for rlibs".to_string());
+ };
+ for &cnum in crates {
+ match fmts.get(cnum.as_usize() - 1) {
+ Some(&Linkage::NotLinked | &Linkage::IncludedFromDylib) => continue,
+ Some(_) => {}
+ None => return Err("could not find formats for rlibs".to_string()),
+ }
+ let name = info.crate_name[&cnum];
+ let used_crate_source = &info.used_crate_source[&cnum];
+ if let Some((path, _)) = &used_crate_source.rlib {
+ f(cnum, &path);
+ } else {
+ if used_crate_source.rmeta.is_some() {
+ return Err(format!(
+ "could not find rlib for: `{}`, found rmeta (metadata) file",
+ name
+ ));
+ } else {
+ return Err(format!("could not find rlib for: `{}`", name));
+ }
+ }
+ }
+ Ok(())
+}
+
+/// Create an 'rlib'.
+///
+/// An rlib in its current incarnation is essentially a renamed .a file. The rlib primarily contains
+/// the object file of the crate, but it also contains all of the object files from native
+/// libraries. This is done by unzipping native libraries and inserting all of the contents into
+/// this archive.
+fn link_rlib<'a>(
+ sess: &'a Session,
+ archive_builder_builder: &dyn ArchiveBuilderBuilder,
+ codegen_results: &CodegenResults,
+ flavor: RlibFlavor,
+ tmpdir: &MaybeTempDir,
+) -> Result<Box<dyn ArchiveBuilder<'a> + 'a>, ErrorGuaranteed> {
+ let lib_search_paths = archive_search_paths(sess);
+
+ let mut ab = archive_builder_builder.new_archive_builder(sess);
+
+ let trailing_metadata = match flavor {
+ RlibFlavor::Normal => {
+ let (metadata, metadata_position) =
+ create_rmeta_file(sess, codegen_results.metadata.raw_data());
+ let metadata = emit_metadata(sess, &metadata, tmpdir);
+ match metadata_position {
+ MetadataPosition::First => {
+ // Most of the time metadata in rlib files is wrapped in a "dummy" object
+ // file for the target platform so the rlib can be processed entirely by
+ // normal linkers for the platform. Sometimes this is not possible however.
+ // If it is possible however, placing the metadata object first improves
+ // performance of getting metadata from rlibs.
+ ab.add_file(&metadata);
+ None
+ }
+ MetadataPosition::Last => Some(metadata),
+ }
+ }
+
+ RlibFlavor::StaticlibBase => None,
+ };
+
+ for m in &codegen_results.modules {
+ if let Some(obj) = m.object.as_ref() {
+ ab.add_file(obj);
+ }
+
+ if let Some(dwarf_obj) = m.dwarf_object.as_ref() {
+ ab.add_file(dwarf_obj);
+ }
+ }
+
+ match flavor {
+ RlibFlavor::Normal => {}
+ RlibFlavor::StaticlibBase => {
+ let obj = codegen_results.allocator_module.as_ref().and_then(|m| m.object.as_ref());
+ if let Some(obj) = obj {
+ ab.add_file(obj);
+ }
+ }
+ }
+
+ // Note that in this loop we are ignoring the value of `lib.cfg`. That is,
+ // we may not be configured to actually include a static library if we're
+ // adding it here. That's because later when we consume this rlib we'll
+ // decide whether we actually needed the static library or not.
+ //
+ // To do this "correctly" we'd need to keep track of which libraries added
+ // which object files to the archive. We don't do that here, however. The
+ // #[link(cfg(..))] feature is unstable, though, and only intended to get
+ // liblibc working. In that sense the check below just indicates that if
+ // there are any libraries we want to omit object files for at link time we
+ // just exclude all custom object files.
+ //
+ // Eventually if we want to stabilize or flesh out the #[link(cfg(..))]
+ // feature then we'll need to figure out how to record what objects were
+ // loaded from the libraries found here and then encode that into the
+ // metadata of the rlib we're generating somehow.
+ for lib in codegen_results.crate_info.used_libraries.iter() {
+ match lib.kind {
+ NativeLibKind::Static { bundle: None | Some(true), whole_archive: Some(true) }
+ if flavor == RlibFlavor::Normal =>
+ {
+ // Don't allow mixing +bundle with +whole_archive since an rlib may contain
+ // multiple native libs, some of which are +whole-archive and some of which are
+ // -whole-archive and it isn't clear how we can currently handle such a
+ // situation correctly.
+ // See https://github.com/rust-lang/rust/issues/88085#issuecomment-901050897
+ sess.err(
+ "the linking modifiers `+bundle` and `+whole-archive` are not compatible \
+ with each other when generating rlibs",
+ );
+ }
+ NativeLibKind::Static { bundle: None | Some(true), .. } => {}
+ NativeLibKind::Static { bundle: Some(false), .. }
+ | NativeLibKind::Dylib { .. }
+ | NativeLibKind::Framework { .. }
+ | NativeLibKind::RawDylib
+ | NativeLibKind::LinkArg
+ | NativeLibKind::Unspecified => continue,
+ }
+ if let Some(name) = lib.name {
+ let location =
+ find_library(name.as_str(), lib.verbatim.unwrap_or(false), &lib_search_paths, sess);
+ ab.add_archive(&location, Box::new(|_| false)).unwrap_or_else(|e| {
+ sess.fatal(&format!(
+ "failed to add native library {}: {}",
+ location.to_string_lossy(),
+ e
+ ));
+ });
+ }
+ }
+
+ for (raw_dylib_name, raw_dylib_imports) in
+ collate_raw_dylibs(sess, &codegen_results.crate_info.used_libraries)?
+ {
+ let output_path = archive_builder_builder.create_dll_import_lib(
+ sess,
+ &raw_dylib_name,
+ &raw_dylib_imports,
+ tmpdir.as_ref(),
+ );
+
+ ab.add_archive(&output_path, Box::new(|_| false)).unwrap_or_else(|e| {
+ sess.fatal(&format!("failed to add native library {}: {}", output_path.display(), e));
+ });
+ }
+
+ if let Some(trailing_metadata) = trailing_metadata {
+ // Note that it is important that we add all of our non-object "magical
+ // files" *after* all of the object files in the archive. The reason for
+ // this is as follows:
+ //
+ // * When performing LTO, this archive will be modified to remove
+ // objects from above. The reason for this is described below.
+ //
+ // * When the system linker looks at an archive, it will attempt to
+ // determine the architecture of the archive in order to see whether its
+ // linkable.
+ //
+ // The algorithm for this detection is: iterate over the files in the
+ // archive. Skip magical SYMDEF names. Interpret the first file as an
+ // object file. Read architecture from the object file.
+ //
+ // * As one can probably see, if "metadata" and "foo.bc" were placed
+ // before all of the objects, then the architecture of this archive would
+ // not be correctly inferred once 'foo.o' is removed.
+ //
+ // * Most of the time metadata in rlib files is wrapped in a "dummy" object
+ // file for the target platform so the rlib can be processed entirely by
+ // normal linkers for the platform. Sometimes this is not possible however.
+ //
+ // Basically, all this means is that this code should not move above the
+ // code above.
+ ab.add_file(&trailing_metadata);
+ }
+
+ return Ok(ab);
+}
+
+/// Extract all symbols defined in raw-dylib libraries, collated by library name.
+///
+/// If we have multiple extern blocks that specify symbols defined in the same raw-dylib library,
+/// then the CodegenResults value contains one NativeLib instance for each block. However, the
+/// linker appears to expect only a single import library for each library used, so we need to
+/// collate the symbols together by library name before generating the import libraries.
+fn collate_raw_dylibs(
+ sess: &Session,
+ used_libraries: &[NativeLib],
+) -> Result<Vec<(String, Vec<DllImport>)>, ErrorGuaranteed> {
+ // Use index maps to preserve original order of imports and libraries.
+ let mut dylib_table = FxIndexMap::<String, FxIndexMap<Symbol, &DllImport>>::default();
+
+ for lib in used_libraries {
+ if lib.kind == NativeLibKind::RawDylib {
+ let ext = if matches!(lib.verbatim, Some(true)) { "" } else { ".dll" };
+ let name = format!("{}{}", lib.name.expect("unnamed raw-dylib library"), ext);
+ let imports = dylib_table.entry(name.clone()).or_default();
+ for import in &lib.dll_imports {
+ if let Some(old_import) = imports.insert(import.name, import) {
+ // FIXME: when we add support for ordinals, figure out if we need to do anything
+ // if we have two DllImport values with the same name but different ordinals.
+ if import.calling_convention != old_import.calling_convention {
+ sess.span_err(
+ import.span,
+ &format!(
+ "multiple declarations of external function `{}` from \
+ library `{}` have different calling conventions",
+ import.name, name,
+ ),
+ );
+ }
+ }
+ }
+ }
+ }
+ sess.compile_status()?;
+ Ok(dylib_table
+ .into_iter()
+ .map(|(name, imports)| {
+ (name, imports.into_iter().map(|(_, import)| import.clone()).collect())
+ })
+ .collect())
+}
+
+/// Create a static archive.
+///
+/// This is essentially the same thing as an rlib, but it also involves adding all of the upstream
+/// crates' objects into the archive. This will slurp in all of the native libraries of upstream
+/// dependencies as well.
+///
+/// Additionally, there's no way for us to link dynamic libraries, so we warn about all dynamic
+/// library dependencies that they're not linked in.
+///
+/// There's no need to include metadata in a static archive, so ensure to not link in the metadata
+/// object file (and also don't prepare the archive with a metadata file).
+fn link_staticlib<'a>(
+ sess: &'a Session,
+ archive_builder_builder: &dyn ArchiveBuilderBuilder,
+ codegen_results: &CodegenResults,
+ out_filename: &Path,
+ tempdir: &MaybeTempDir,
+) -> Result<(), ErrorGuaranteed> {
+ info!("preparing staticlib to {:?}", out_filename);
+ let mut ab = link_rlib(
+ sess,
+ archive_builder_builder,
+ codegen_results,
+ RlibFlavor::StaticlibBase,
+ tempdir,
+ )?;
+ let mut all_native_libs = vec![];
+
+ let res = each_linked_rlib(&codegen_results.crate_info, &mut |cnum, path| {
+ let name = codegen_results.crate_info.crate_name[&cnum];
+ let native_libs = &codegen_results.crate_info.native_libraries[&cnum];
+
+ // Here when we include the rlib into our staticlib we need to make a
+ // decision whether to include the extra object files along the way.
+ // These extra object files come from statically included native
+ // libraries, but they may be cfg'd away with #[link(cfg(..))].
+ //
+ // This unstable feature, though, only needs liblibc to work. The only
+ // use case there is where musl is statically included in liblibc.rlib,
+ // so if we don't want the included version we just need to skip it. As
+ // a result the logic here is that if *any* linked library is cfg'd away
+ // we just skip all object files.
+ //
+ // Clearly this is not sufficient for a general purpose feature, and
+ // we'd want to read from the library's metadata to determine which
+ // object files come from where and selectively skip them.
+ let skip_object_files = native_libs.iter().any(|lib| {
+ matches!(lib.kind, NativeLibKind::Static { bundle: None | Some(true), .. })
+ && !relevant_lib(sess, lib)
+ });
+
+ let lto = are_upstream_rust_objects_already_included(sess)
+ && !ignored_for_lto(sess, &codegen_results.crate_info, cnum);
+
+ // Ignoring obj file starting with the crate name
+ // as simple comparison is not enough - there
+ // might be also an extra name suffix
+ let obj_start = name.as_str().to_owned();
+
+ ab.add_archive(
+ path,
+ Box::new(move |fname: &str| {
+ // Ignore metadata files, no matter the name.
+ if fname == METADATA_FILENAME {
+ return true;
+ }
+
+ // Don't include Rust objects if LTO is enabled
+ if lto && looks_like_rust_object_file(fname) {
+ return true;
+ }
+
+ // Otherwise if this is *not* a rust object and we're skipping
+ // objects then skip this file
+ if skip_object_files && (!fname.starts_with(&obj_start) || !fname.ends_with(".o")) {
+ return true;
+ }
+
+ // ok, don't skip this
+ false
+ }),
+ )
+ .unwrap();
+
+ all_native_libs.extend(codegen_results.crate_info.native_libraries[&cnum].iter().cloned());
+ });
+ if let Err(e) = res {
+ sess.fatal(&e);
+ }
+
+ ab.build(out_filename);
+
+ if !all_native_libs.is_empty() {
+ if sess.opts.prints.contains(&PrintRequest::NativeStaticLibs) {
+ print_native_static_libs(sess, &all_native_libs);
+ }
+ }
+
+ Ok(())
+}
+
+fn escape_stdout_stderr_string(s: &[u8]) -> String {
+ str::from_utf8(s).map(|s| s.to_owned()).unwrap_or_else(|_| {
+ let mut x = "Non-UTF-8 output: ".to_string();
+ x.extend(s.iter().flat_map(|&b| ascii::escape_default(b)).map(char::from));
+ x
+ })
+}
+
+/// Use `thorin` (rust implementation of a dwarf packaging utility) to link DWARF objects into a
+/// DWARF package.
+fn link_dwarf_object<'a>(
+ sess: &'a Session,
+ cg_results: &CodegenResults,
+ executable_out_filename: &Path,
+) {
+ let dwp_out_filename = executable_out_filename.with_extension("dwp");
+ debug!(?dwp_out_filename, ?executable_out_filename);
+
+ #[derive(Default)]
+ struct ThorinSession<Relocations> {
+ arena_data: TypedArena<Vec<u8>>,
+ arena_mmap: TypedArena<Mmap>,
+ arena_relocations: TypedArena<Relocations>,
+ }
+
+ impl<Relocations> ThorinSession<Relocations> {
+ fn alloc_mmap<'arena>(&'arena self, data: Mmap) -> &'arena Mmap {
+ (*self.arena_mmap.alloc(data)).borrow()
+ }
+ }
+
+ impl<Relocations> thorin::Session<Relocations> for ThorinSession<Relocations> {
+ fn alloc_data<'arena>(&'arena self, data: Vec<u8>) -> &'arena [u8] {
+ (*self.arena_data.alloc(data)).borrow()
+ }
+
+ fn alloc_relocation<'arena>(&'arena self, data: Relocations) -> &'arena Relocations {
+ (*self.arena_relocations.alloc(data)).borrow()
+ }
+
+ fn read_input<'arena>(&'arena self, path: &Path) -> std::io::Result<&'arena [u8]> {
+ let file = File::open(&path)?;
+ let mmap = (unsafe { Mmap::map(file) })?;
+ Ok(self.alloc_mmap(mmap))
+ }
+ }
+
+ match sess.time("run_thorin", || -> Result<(), thorin::Error> {
+ let thorin_sess = ThorinSession::default();
+ let mut package = thorin::DwarfPackage::new(&thorin_sess);
+
+ // Input objs contain .o/.dwo files from the current crate.
+ match sess.opts.unstable_opts.split_dwarf_kind {
+ SplitDwarfKind::Single => {
+ for input_obj in cg_results.modules.iter().filter_map(|m| m.object.as_ref()) {
+ package.add_input_object(input_obj)?;
+ }
+ }
+ SplitDwarfKind::Split => {
+ for input_obj in cg_results.modules.iter().filter_map(|m| m.dwarf_object.as_ref()) {
+ package.add_input_object(input_obj)?;
+ }
+ }
+ }
+
+ // Input rlibs contain .o/.dwo files from dependencies.
+ let input_rlibs = cg_results
+ .crate_info
+ .used_crate_source
+ .values()
+ .filter_map(|csource| csource.rlib.as_ref())
+ .map(|(path, _)| path);
+ for input_rlib in input_rlibs {
+ debug!(?input_rlib);
+ package.add_input_object(input_rlib)?;
+ }
+
+ // Failing to read the referenced objects is expected for dependencies where the path in the
+ // executable will have been cleaned by Cargo, but the referenced objects will be contained
+ // within rlibs provided as inputs.
+ //
+ // If paths have been remapped, then .o/.dwo files from the current crate also won't be
+ // found, but are provided explicitly above.
+ //
+ // Adding an executable is primarily done to make `thorin` check that all the referenced
+ // dwarf objects are found in the end.
+ package.add_executable(
+ &executable_out_filename,
+ thorin::MissingReferencedObjectBehaviour::Skip,
+ )?;
+
+ let output = package.finish()?.write()?;
+ let mut output_stream = BufWriter::new(
+ OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .truncate(true)
+ .open(dwp_out_filename)?,
+ );
+ output_stream.write_all(&output)?;
+ output_stream.flush()?;
+
+ Ok(())
+ }) {
+ Ok(()) => {}
+ Err(e) => {
+ sess.struct_err("linking dwarf objects with thorin failed")
+ .note(&format!("{:?}", e))
+ .emit();
+ sess.abort_if_errors();
+ }
+ }
+}
+
+/// Create a dynamic library or executable.
+///
+/// This will invoke the system linker/cc to create the resulting file. This links to all upstream
+/// files as well.
+fn link_natively<'a>(
+ sess: &'a Session,
+ archive_builder_builder: &dyn ArchiveBuilderBuilder,
+ crate_type: CrateType,
+ out_filename: &Path,
+ codegen_results: &CodegenResults,
+ tmpdir: &Path,
+) -> Result<(), ErrorGuaranteed> {
+ info!("preparing {:?} to {:?}", crate_type, out_filename);
+ let (linker_path, flavor) = linker_and_flavor(sess);
+ let mut cmd = linker_with_args(
+ &linker_path,
+ flavor,
+ sess,
+ archive_builder_builder,
+ crate_type,
+ tmpdir,
+ out_filename,
+ codegen_results,
+ )?;
+
+ linker::disable_localization(&mut cmd);
+
+ for &(ref k, ref v) in sess.target.link_env.as_ref() {
+ cmd.env(k.as_ref(), v.as_ref());
+ }
+ for k in sess.target.link_env_remove.as_ref() {
+ cmd.env_remove(k.as_ref());
+ }
+
+ if sess.opts.prints.contains(&PrintRequest::LinkArgs) {
+ println!("{:?}", &cmd);
+ }
+
+ // May have not found libraries in the right formats.
+ sess.abort_if_errors();
+
+ // Invoke the system linker
+ info!("{:?}", &cmd);
+ let retry_on_segfault = env::var("RUSTC_RETRY_LINKER_ON_SEGFAULT").is_ok();
+ let unknown_arg_regex =
+ Regex::new(r"(unknown|unrecognized) (command line )?(option|argument)").unwrap();
+ let mut prog;
+ let mut i = 0;
+ loop {
+ i += 1;
+ prog = sess.time("run_linker", || exec_linker(sess, &cmd, out_filename, tmpdir));
+ let Ok(ref output) = prog else {
+ break;
+ };
+ if output.status.success() {
+ break;
+ }
+ let mut out = output.stderr.clone();
+ out.extend(&output.stdout);
+ let out = String::from_utf8_lossy(&out);
+
+ // Check to see if the link failed with an error message that indicates it
+ // doesn't recognize the -no-pie option. If so, re-perform the link step
+ // without it. This is safe because if the linker doesn't support -no-pie
+ // then it should not default to linking executables as pie. Different
+ // versions of gcc seem to use different quotes in the error message so
+ // don't check for them.
+ if sess.target.linker_is_gnu
+ && flavor != LinkerFlavor::Ld
+ && unknown_arg_regex.is_match(&out)
+ && out.contains("-no-pie")
+ && cmd.get_args().iter().any(|e| e.to_string_lossy() == "-no-pie")
+ {
+ info!("linker output: {:?}", out);
+ warn!("Linker does not support -no-pie command line option. Retrying without.");
+ for arg in cmd.take_args() {
+ if arg.to_string_lossy() != "-no-pie" {
+ cmd.arg(arg);
+ }
+ }
+ info!("{:?}", &cmd);
+ continue;
+ }
+
+ // Detect '-static-pie' used with an older version of gcc or clang not supporting it.
+ // Fallback from '-static-pie' to '-static' in that case.
+ if sess.target.linker_is_gnu
+ && flavor != LinkerFlavor::Ld
+ && unknown_arg_regex.is_match(&out)
+ && (out.contains("-static-pie") || out.contains("--no-dynamic-linker"))
+ && cmd.get_args().iter().any(|e| e.to_string_lossy() == "-static-pie")
+ {
+ info!("linker output: {:?}", out);
+ warn!(
+ "Linker does not support -static-pie command line option. Retrying with -static instead."
+ );
+ // Mirror `add_(pre,post)_link_objects` to replace CRT objects.
+ let self_contained = crt_objects_fallback(sess, crate_type);
+ let opts = &sess.target;
+ let pre_objects = if self_contained {
+ &opts.pre_link_objects_fallback
+ } else {
+ &opts.pre_link_objects
+ };
+ let post_objects = if self_contained {
+ &opts.post_link_objects_fallback
+ } else {
+ &opts.post_link_objects
+ };
+ let get_objects = |objects: &CrtObjects, kind| {
+ objects
+ .get(&kind)
+ .iter()
+ .copied()
+ .flatten()
+ .map(|obj| get_object_file_path(sess, obj, self_contained).into_os_string())
+ .collect::<Vec<_>>()
+ };
+ let pre_objects_static_pie = get_objects(pre_objects, LinkOutputKind::StaticPicExe);
+ let post_objects_static_pie = get_objects(post_objects, LinkOutputKind::StaticPicExe);
+ let mut pre_objects_static = get_objects(pre_objects, LinkOutputKind::StaticNoPicExe);
+ let mut post_objects_static = get_objects(post_objects, LinkOutputKind::StaticNoPicExe);
+ // Assume that we know insertion positions for the replacement arguments from replaced
+ // arguments, which is true for all supported targets.
+ assert!(pre_objects_static.is_empty() || !pre_objects_static_pie.is_empty());
+ assert!(post_objects_static.is_empty() || !post_objects_static_pie.is_empty());
+ for arg in cmd.take_args() {
+ if arg.to_string_lossy() == "-static-pie" {
+ // Replace the output kind.
+ cmd.arg("-static");
+ } else if pre_objects_static_pie.contains(&arg) {
+ // Replace the pre-link objects (replace the first and remove the rest).
+ cmd.args(mem::take(&mut pre_objects_static));
+ } else if post_objects_static_pie.contains(&arg) {
+ // Replace the post-link objects (replace the first and remove the rest).
+ cmd.args(mem::take(&mut post_objects_static));
+ } else {
+ cmd.arg(arg);
+ }
+ }
+ info!("{:?}", &cmd);
+ continue;
+ }
+
+ // Here's a terribly awful hack that really shouldn't be present in any
+ // compiler. Here an environment variable is supported to automatically
+ // retry the linker invocation if the linker looks like it segfaulted.
+ //
+ // Gee that seems odd, normally segfaults are things we want to know
+ // about! Unfortunately though in rust-lang/rust#38878 we're
+ // experiencing the linker segfaulting on Travis quite a bit which is
+ // causing quite a bit of pain to land PRs when they spuriously fail
+ // due to a segfault.
+ //
+ // The issue #38878 has some more debugging information on it as well,
+ // but this unfortunately looks like it's just a race condition in
+ // macOS's linker with some thread pool working in the background. It
+ // seems that no one currently knows a fix for this so in the meantime
+ // we're left with this...
+ if !retry_on_segfault || i > 3 {
+ break;
+ }
+ let msg_segv = "clang: error: unable to execute command: Segmentation fault: 11";
+ let msg_bus = "clang: error: unable to execute command: Bus error: 10";
+ if out.contains(msg_segv) || out.contains(msg_bus) {
+ warn!(
+ ?cmd, %out,
+ "looks like the linker segfaulted when we tried to call it, \
+ automatically retrying again",
+ );
+ continue;
+ }
+
+ if is_illegal_instruction(&output.status) {
+ warn!(
+ ?cmd, %out, status = %output.status,
+ "looks like the linker hit an illegal instruction when we \
+ tried to call it, automatically retrying again.",
+ );
+ continue;
+ }
+
+ #[cfg(unix)]
+ fn is_illegal_instruction(status: &ExitStatus) -> bool {
+ use std::os::unix::prelude::*;
+ status.signal() == Some(libc::SIGILL)
+ }
+
+ #[cfg(not(unix))]
+ fn is_illegal_instruction(_status: &ExitStatus) -> bool {
+ false
+ }
+ }
+
+ match prog {
+ Ok(prog) => {
+ if !prog.status.success() {
+ let mut output = prog.stderr.clone();
+ output.extend_from_slice(&prog.stdout);
+ let escaped_output = escape_stdout_stderr_string(&output);
+ let mut err = sess.struct_err(&format!(
+ "linking with `{}` failed: {}",
+ linker_path.display(),
+ prog.status
+ ));
+ err.note(&format!("{:?}", &cmd)).note(&escaped_output);
+ if escaped_output.contains("undefined reference to") {
+ err.help(
+ "some `extern` functions couldn't be found; some native libraries may \
+ need to be installed or have their path specified",
+ );
+ err.note("use the `-l` flag to specify native libraries to link");
+ err.note("use the `cargo:rustc-link-lib` directive to specify the native \
+ libraries to link with Cargo (see https://doc.rust-lang.org/cargo/reference/build-scripts.html#cargorustc-link-libkindname)");
+ }
+ err.emit();
+
+ // If MSVC's `link.exe` was expected but the return code
+ // is not a Microsoft LNK error then suggest a way to fix or
+ // install the Visual Studio build tools.
+ if let Some(code) = prog.status.code() {
+ if sess.target.is_like_msvc
+ && flavor == LinkerFlavor::Msvc
+ // Respect the command line override
+ && sess.opts.cg.linker.is_none()
+ // Match exactly "link.exe"
+ && linker_path.to_str() == Some("link.exe")
+ // All Microsoft `link.exe` linking error codes are
+ // four digit numbers in the range 1000 to 9999 inclusive
+ && (code < 1000 || code > 9999)
+ {
+ let is_vs_installed = windows_registry::find_vs_version().is_ok();
+ let has_linker = windows_registry::find_tool(
+ &sess.opts.target_triple.triple(),
+ "link.exe",
+ )
+ .is_some();
+
+ sess.note_without_error("`link.exe` returned an unexpected error");
+ if is_vs_installed && has_linker {
+ // the linker is broken
+ sess.note_without_error(
+ "the Visual Studio build tools may need to be repaired \
+ using the Visual Studio installer",
+ );
+ sess.note_without_error(
+ "or a necessary component may be missing from the \
+ \"C++ build tools\" workload",
+ );
+ } else if is_vs_installed {
+ // the linker is not installed
+ sess.note_without_error(
+ "in the Visual Studio installer, ensure the \
+ \"C++ build tools\" workload is selected",
+ );
+ } else {
+ // visual studio is not installed
+ sess.note_without_error(
+ "you may need to install Visual Studio build tools with the \
+ \"C++ build tools\" workload",
+ );
+ }
+ }
+ }
+
+ sess.abort_if_errors();
+ }
+ info!("linker stderr:\n{}", escape_stdout_stderr_string(&prog.stderr));
+ info!("linker stdout:\n{}", escape_stdout_stderr_string(&prog.stdout));
+ }
+ Err(e) => {
+ let linker_not_found = e.kind() == io::ErrorKind::NotFound;
+
+ let mut linker_error = {
+ if linker_not_found {
+ sess.struct_err(&format!("linker `{}` not found", linker_path.display()))
+ } else {
+ sess.struct_err(&format!(
+ "could not exec the linker `{}`",
+ linker_path.display()
+ ))
+ }
+ };
+
+ linker_error.note(&e.to_string());
+
+ if !linker_not_found {
+ linker_error.note(&format!("{:?}", &cmd));
+ }
+
+ linker_error.emit();
+
+ if sess.target.is_like_msvc && linker_not_found {
+ sess.note_without_error(
+ "the msvc targets depend on the msvc linker \
+ but `link.exe` was not found",
+ );
+ sess.note_without_error(
+ "please ensure that VS 2013, VS 2015, VS 2017, VS 2019 or VS 2022 \
+ was installed with the Visual C++ option",
+ );
+ }
+ sess.abort_if_errors();
+ }
+ }
+
+ match sess.split_debuginfo() {
+ // If split debug information is disabled or located in individual files
+ // there's nothing to do here.
+ SplitDebuginfo::Off | SplitDebuginfo::Unpacked => {}
+
+ // If packed split-debuginfo is requested, but the final compilation
+ // doesn't actually have any debug information, then we skip this step.
+ SplitDebuginfo::Packed if sess.opts.debuginfo == DebugInfo::None => {}
+
+ // On macOS the external `dsymutil` tool is used to create the packed
+ // debug information. Note that this will read debug information from
+ // the objects on the filesystem which we'll clean up later.
+ SplitDebuginfo::Packed if sess.target.is_like_osx => {
+ let prog = Command::new("dsymutil").arg(out_filename).output();
+ match prog {
+ Ok(prog) => {
+ if !prog.status.success() {
+ let mut output = prog.stderr.clone();
+ output.extend_from_slice(&prog.stdout);
+ sess.struct_warn(&format!(
+ "processing debug info with `dsymutil` failed: {}",
+ prog.status
+ ))
+ .note(&escape_string(&output))
+ .emit();
+ }
+ }
+ Err(e) => sess.fatal(&format!("unable to run `dsymutil`: {}", e)),
+ }
+ }
+
+ // On MSVC packed debug information is produced by the linker itself so
+ // there's no need to do anything else here.
+ SplitDebuginfo::Packed if sess.target.is_like_windows => {}
+
+ // ... and otherwise we're processing a `*.dwp` packed dwarf file.
+ //
+ // We cannot rely on the .o paths in the executable because they may have been
+ // remapped by --remap-path-prefix and therefore invalid, so we need to provide
+ // the .o/.dwo paths explicitly.
+ SplitDebuginfo::Packed => link_dwarf_object(sess, codegen_results, out_filename),
+ }
+
+ let strip = strip_value(sess);
+
+ if sess.target.is_like_osx {
+ match (strip, crate_type) {
+ (Strip::Debuginfo, _) => strip_symbols_in_osx(sess, &out_filename, Some("-S")),
+ // Per the manpage, `-x` is the maximum safe strip level for dynamic libraries. (#93988)
+ (Strip::Symbols, CrateType::Dylib | CrateType::Cdylib | CrateType::ProcMacro) => {
+ strip_symbols_in_osx(sess, &out_filename, Some("-x"))
+ }
+ (Strip::Symbols, _) => strip_symbols_in_osx(sess, &out_filename, None),
+ (Strip::None, _) => {}
+ }
+ }
+
+ Ok(())
+}
+
+// Temporarily support both -Z strip and -C strip
+fn strip_value(sess: &Session) -> Strip {
+ match (sess.opts.unstable_opts.strip, sess.opts.cg.strip) {
+ (s, Strip::None) => s,
+ (_, s) => s,
+ }
+}
+
+fn strip_symbols_in_osx<'a>(sess: &'a Session, out_filename: &Path, option: Option<&str>) {
+ let mut cmd = Command::new("strip");
+ if let Some(option) = option {
+ cmd.arg(option);
+ }
+ let prog = cmd.arg(out_filename).output();
+ match prog {
+ Ok(prog) => {
+ if !prog.status.success() {
+ let mut output = prog.stderr.clone();
+ output.extend_from_slice(&prog.stdout);
+ sess.struct_warn(&format!(
+ "stripping debug info with `strip` failed: {}",
+ prog.status
+ ))
+ .note(&escape_string(&output))
+ .emit();
+ }
+ }
+ Err(e) => sess.fatal(&format!("unable to run `strip`: {}", e)),
+ }
+}
+
+fn escape_string(s: &[u8]) -> String {
+ str::from_utf8(s).map(|s| s.to_owned()).unwrap_or_else(|_| {
+ let mut x = "Non-UTF-8 output: ".to_string();
+ x.extend(s.iter().flat_map(|&b| ascii::escape_default(b)).map(char::from));
+ x
+ })
+}
+
+fn add_sanitizer_libraries(sess: &Session, crate_type: CrateType, linker: &mut dyn Linker) {
+ // On macOS the runtimes are distributed as dylibs which should be linked to
+ // both executables and dynamic shared objects. Everywhere else the runtimes
+ // are currently distributed as static libraries which should be linked to
+ // executables only.
+ let needs_runtime = match crate_type {
+ CrateType::Executable => true,
+ CrateType::Dylib | CrateType::Cdylib | CrateType::ProcMacro => sess.target.is_like_osx,
+ CrateType::Rlib | CrateType::Staticlib => false,
+ };
+
+ if !needs_runtime {
+ return;
+ }
+
+ let sanitizer = sess.opts.unstable_opts.sanitizer;
+ if sanitizer.contains(SanitizerSet::ADDRESS) {
+ link_sanitizer_runtime(sess, linker, "asan");
+ }
+ if sanitizer.contains(SanitizerSet::LEAK) {
+ link_sanitizer_runtime(sess, linker, "lsan");
+ }
+ if sanitizer.contains(SanitizerSet::MEMORY) {
+ link_sanitizer_runtime(sess, linker, "msan");
+ }
+ if sanitizer.contains(SanitizerSet::THREAD) {
+ link_sanitizer_runtime(sess, linker, "tsan");
+ }
+ if sanitizer.contains(SanitizerSet::HWADDRESS) {
+ link_sanitizer_runtime(sess, linker, "hwasan");
+ }
+}
+
+fn link_sanitizer_runtime(sess: &Session, linker: &mut dyn Linker, name: &str) {
+ fn find_sanitizer_runtime(sess: &Session, filename: &str) -> PathBuf {
+ let session_tlib =
+ filesearch::make_target_lib_path(&sess.sysroot, sess.opts.target_triple.triple());
+ let path = session_tlib.join(filename);
+ if path.exists() {
+ return session_tlib;
+ } else {
+ let default_sysroot = filesearch::get_or_default_sysroot();
+ let default_tlib = filesearch::make_target_lib_path(
+ &default_sysroot,
+ sess.opts.target_triple.triple(),
+ );
+ return default_tlib;
+ }
+ }
+
+ let channel = option_env!("CFG_RELEASE_CHANNEL")
+ .map(|channel| format!("-{}", channel))
+ .unwrap_or_default();
+
+ if sess.target.is_like_osx {
+ // On Apple platforms, the sanitizer is always built as a dylib, and
+ // LLVM will link to `@rpath/*.dylib`, so we need to specify an
+ // rpath to the library as well (the rpath should be absolute, see
+ // PR #41352 for details).
+ let filename = format!("rustc{}_rt.{}", channel, name);
+ let path = find_sanitizer_runtime(&sess, &filename);
+ let rpath = path.to_str().expect("non-utf8 component in path");
+ linker.args(&["-Wl,-rpath", "-Xlinker", rpath]);
+ linker.link_dylib(&filename, false, true);
+ } else {
+ let filename = format!("librustc{}_rt.{}.a", channel, name);
+ let path = find_sanitizer_runtime(&sess, &filename).join(&filename);
+ linker.link_whole_rlib(&path);
+ }
+}
+
+/// Returns a boolean indicating whether the specified crate should be ignored
+/// during LTO.
+///
+/// Crates ignored during LTO are not lumped together in the "massive object
+/// file" that we create and are linked in their normal rlib states. See
+/// comments below for what crates do not participate in LTO.
+///
+/// It's unusual for a crate to not participate in LTO. Typically only
+/// compiler-specific and unstable crates have a reason to not participate in
+/// LTO.
+pub fn ignored_for_lto(sess: &Session, info: &CrateInfo, cnum: CrateNum) -> bool {
+ // If our target enables builtin function lowering in LLVM then the
+ // crates providing these functions don't participate in LTO (e.g.
+ // no_builtins or compiler builtins crates).
+ !sess.target.no_builtins
+ && (info.compiler_builtins == Some(cnum) || info.is_no_builtins.contains(&cnum))
+}
+
+// This functions tries to determine the appropriate linker (and corresponding LinkerFlavor) to use
+pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) {
+ fn infer_from(
+ sess: &Session,
+ linker: Option<PathBuf>,
+ flavor: Option<LinkerFlavor>,
+ ) -> Option<(PathBuf, LinkerFlavor)> {
+ match (linker, flavor) {
+ (Some(linker), Some(flavor)) => Some((linker, flavor)),
+ // only the linker flavor is known; use the default linker for the selected flavor
+ (None, Some(flavor)) => Some((
+ PathBuf::from(match flavor {
+ LinkerFlavor::Em => {
+ if cfg!(windows) {
+ "emcc.bat"
+ } else {
+ "emcc"
+ }
+ }
+ LinkerFlavor::Gcc => {
+ if cfg!(any(target_os = "solaris", target_os = "illumos")) {
+ // On historical Solaris systems, "cc" may have
+ // been Sun Studio, which is not flag-compatible
+ // with "gcc". This history casts a long shadow,
+ // and many modern illumos distributions today
+ // ship GCC as "gcc" without also making it
+ // available as "cc".
+ "gcc"
+ } else {
+ "cc"
+ }
+ }
+ LinkerFlavor::Ld => "ld",
+ LinkerFlavor::Msvc => "link.exe",
+ LinkerFlavor::Lld(_) => "lld",
+ LinkerFlavor::PtxLinker => "rust-ptx-linker",
+ LinkerFlavor::BpfLinker => "bpf-linker",
+ LinkerFlavor::L4Bender => "l4-bender",
+ }),
+ flavor,
+ )),
+ (Some(linker), None) => {
+ let stem = linker.file_stem().and_then(|stem| stem.to_str()).unwrap_or_else(|| {
+ sess.fatal("couldn't extract file stem from specified linker")
+ });
+
+ let flavor = if stem == "emcc" {
+ LinkerFlavor::Em
+ } else if stem == "gcc"
+ || stem.ends_with("-gcc")
+ || stem == "clang"
+ || stem.ends_with("-clang")
+ {
+ LinkerFlavor::Gcc
+ } else if stem == "wasm-ld" || stem.ends_with("-wasm-ld") {
+ LinkerFlavor::Lld(LldFlavor::Wasm)
+ } else if stem == "ld" || stem == "ld.lld" || stem.ends_with("-ld") {
+ LinkerFlavor::Ld
+ } else if stem == "link" || stem == "lld-link" {
+ LinkerFlavor::Msvc
+ } else if stem == "lld" || stem == "rust-lld" {
+ LinkerFlavor::Lld(sess.target.lld_flavor)
+ } else {
+ // fall back to the value in the target spec
+ sess.target.linker_flavor
+ };
+
+ Some((linker, flavor))
+ }
+ (None, None) => None,
+ }
+ }
+
+ // linker and linker flavor specified via command line have precedence over what the target
+ // specification specifies
+ if let Some(ret) = infer_from(sess, sess.opts.cg.linker.clone(), sess.opts.cg.linker_flavor) {
+ return ret;
+ }
+
+ if let Some(ret) = infer_from(
+ sess,
+ sess.target.linker.as_deref().map(PathBuf::from),
+ Some(sess.target.linker_flavor),
+ ) {
+ return ret;
+ }
+
+ bug!("Not enough information provided to determine how to invoke the linker");
+}
+
+/// Returns a pair of boolean indicating whether we should preserve the object and
+/// dwarf object files on the filesystem for their debug information. This is often
+/// useful with split-dwarf like schemes.
+fn preserve_objects_for_their_debuginfo(sess: &Session) -> (bool, bool) {
+ // If the objects don't have debuginfo there's nothing to preserve.
+ if sess.opts.debuginfo == config::DebugInfo::None {
+ return (false, false);
+ }
+
+ // If we're only producing artifacts that are archives, no need to preserve
+ // the objects as they're losslessly contained inside the archives.
+ if sess.crate_types().iter().all(|&x| x.is_archive()) {
+ return (false, false);
+ }
+
+ match (sess.split_debuginfo(), sess.opts.unstable_opts.split_dwarf_kind) {
+ // If there is no split debuginfo then do not preserve objects.
+ (SplitDebuginfo::Off, _) => (false, false),
+ // If there is packed split debuginfo, then the debuginfo in the objects
+ // has been packaged and the objects can be deleted.
+ (SplitDebuginfo::Packed, _) => (false, false),
+ // If there is unpacked split debuginfo and the current target can not use
+ // split dwarf, then keep objects.
+ (SplitDebuginfo::Unpacked, _) if !sess.target_can_use_split_dwarf() => (true, false),
+ // If there is unpacked split debuginfo and the target can use split dwarf, then
+ // keep the object containing that debuginfo (whether that is an object file or
+ // dwarf object file depends on the split dwarf kind).
+ (SplitDebuginfo::Unpacked, SplitDwarfKind::Single) => (true, false),
+ (SplitDebuginfo::Unpacked, SplitDwarfKind::Split) => (false, true),
+ }
+}
+
+fn archive_search_paths(sess: &Session) -> Vec<PathBuf> {
+ sess.target_filesearch(PathKind::Native).search_path_dirs()
+}
+
+#[derive(PartialEq)]
+enum RlibFlavor {
+ Normal,
+ StaticlibBase,
+}
+
+fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLib]) {
+ let lib_args: Vec<_> = all_native_libs
+ .iter()
+ .filter(|l| relevant_lib(sess, l))
+ .filter_map(|lib| {
+ let name = lib.name?;
+ match lib.kind {
+ NativeLibKind::Static { bundle: Some(false), .. }
+ | NativeLibKind::Dylib { .. }
+ | NativeLibKind::Unspecified => {
+ let verbatim = lib.verbatim.unwrap_or(false);
+ if sess.target.is_like_msvc {
+ Some(format!("{}{}", name, if verbatim { "" } else { ".lib" }))
+ } else if sess.target.linker_is_gnu {
+ Some(format!("-l{}{}", if verbatim { ":" } else { "" }, name))
+ } else {
+ Some(format!("-l{}", name))
+ }
+ }
+ NativeLibKind::Framework { .. } => {
+ // ld-only syntax, since there are no frameworks in MSVC
+ Some(format!("-framework {}", name))
+ }
+ // These are included, no need to print them
+ NativeLibKind::Static { bundle: None | Some(true), .. }
+ | NativeLibKind::LinkArg
+ | NativeLibKind::RawDylib => None,
+ }
+ })
+ .collect();
+ if !lib_args.is_empty() {
+ sess.note_without_error(
+ "Link against the following native artifacts when linking \
+ against this static library. The order and any duplication \
+ can be significant on some platforms.",
+ );
+ // Prefix for greppability
+ sess.note_without_error(&format!("native-static-libs: {}", &lib_args.join(" ")));
+ }
+}
+
+fn get_object_file_path(sess: &Session, name: &str, self_contained: bool) -> PathBuf {
+ let fs = sess.target_filesearch(PathKind::Native);
+ let file_path = fs.get_lib_path().join(name);
+ if file_path.exists() {
+ return file_path;
+ }
+ // Special directory with objects used only in self-contained linkage mode
+ if self_contained {
+ let file_path = fs.get_self_contained_lib_path().join(name);
+ if file_path.exists() {
+ return file_path;
+ }
+ }
+ for search_path in fs.search_paths() {
+ let file_path = search_path.dir.join(name);
+ if file_path.exists() {
+ return file_path;
+ }
+ }
+ PathBuf::from(name)
+}
+
+fn exec_linker(
+ sess: &Session,
+ cmd: &Command,
+ out_filename: &Path,
+ tmpdir: &Path,
+) -> io::Result<Output> {
+ // When attempting to spawn the linker we run a risk of blowing out the
+ // size limits for spawning a new process with respect to the arguments
+ // we pass on the command line.
+ //
+ // Here we attempt to handle errors from the OS saying "your list of
+ // arguments is too big" by reinvoking the linker again with an `@`-file
+ // that contains all the arguments. The theory is that this is then
+ // accepted on all linkers and the linker will read all its options out of
+ // there instead of looking at the command line.
+ if !cmd.very_likely_to_exceed_some_spawn_limit() {
+ match cmd.command().stdout(Stdio::piped()).stderr(Stdio::piped()).spawn() {
+ Ok(child) => {
+ let output = child.wait_with_output();
+ flush_linked_file(&output, out_filename)?;
+ return output;
+ }
+ Err(ref e) if command_line_too_big(e) => {
+ info!("command line to linker was too big: {}", e);
+ }
+ Err(e) => return Err(e),
+ }
+ }
+
+ info!("falling back to passing arguments to linker via an @-file");
+ let mut cmd2 = cmd.clone();
+ let mut args = String::new();
+ for arg in cmd2.take_args() {
+ args.push_str(
+ &Escape { arg: arg.to_str().unwrap(), is_like_msvc: sess.target.is_like_msvc }
+ .to_string(),
+ );
+ args.push('\n');
+ }
+ let file = tmpdir.join("linker-arguments");
+ let bytes = if sess.target.is_like_msvc {
+ let mut out = Vec::with_capacity((1 + args.len()) * 2);
+ // start the stream with a UTF-16 BOM
+ for c in std::iter::once(0xFEFF).chain(args.encode_utf16()) {
+ // encode in little endian
+ out.push(c as u8);
+ out.push((c >> 8) as u8);
+ }
+ out
+ } else {
+ args.into_bytes()
+ };
+ fs::write(&file, &bytes)?;
+ cmd2.arg(format!("@{}", file.display()));
+ info!("invoking linker {:?}", cmd2);
+ let output = cmd2.output();
+ flush_linked_file(&output, out_filename)?;
+ return output;
+
+ #[cfg(not(windows))]
+ fn flush_linked_file(_: &io::Result<Output>, _: &Path) -> io::Result<()> {
+ Ok(())
+ }
+
+ #[cfg(windows)]
+ fn flush_linked_file(
+ command_output: &io::Result<Output>,
+ out_filename: &Path,
+ ) -> io::Result<()> {
+ // On Windows, under high I/O load, output buffers are sometimes not flushed,
+ // even long after process exit, causing nasty, non-reproducible output bugs.
+ //
+ // File::sync_all() calls FlushFileBuffers() down the line, which solves the problem.
+ //
+ // А full writeup of the original Chrome bug can be found at
+ // randomascii.wordpress.com/2018/02/25/compiler-bug-linker-bug-windows-kernel-bug/amp
+
+ if let &Ok(ref out) = command_output {
+ if out.status.success() {
+ if let Ok(of) = fs::OpenOptions::new().write(true).open(out_filename) {
+ of.sync_all()?;
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ #[cfg(unix)]
+ fn command_line_too_big(err: &io::Error) -> bool {
+ err.raw_os_error() == Some(::libc::E2BIG)
+ }
+
+ #[cfg(windows)]
+ fn command_line_too_big(err: &io::Error) -> bool {
+ const ERROR_FILENAME_EXCED_RANGE: i32 = 206;
+ err.raw_os_error() == Some(ERROR_FILENAME_EXCED_RANGE)
+ }
+
+ #[cfg(not(any(unix, windows)))]
+ fn command_line_too_big(_: &io::Error) -> bool {
+ false
+ }
+
+ struct Escape<'a> {
+ arg: &'a str,
+ is_like_msvc: bool,
+ }
+
+ impl<'a> fmt::Display for Escape<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.is_like_msvc {
+ // This is "documented" at
+ // https://docs.microsoft.com/en-us/cpp/build/reference/at-specify-a-linker-response-file
+ //
+ // Unfortunately there's not a great specification of the
+ // syntax I could find online (at least) but some local
+ // testing showed that this seemed sufficient-ish to catch
+ // at least a few edge cases.
+ write!(f, "\"")?;
+ for c in self.arg.chars() {
+ match c {
+ '"' => write!(f, "\\{}", c)?,
+ c => write!(f, "{}", c)?,
+ }
+ }
+ write!(f, "\"")?;
+ } else {
+ // This is documented at https://linux.die.net/man/1/ld, namely:
+ //
+ // > Options in file are separated by whitespace. A whitespace
+ // > character may be included in an option by surrounding the
+ // > entire option in either single or double quotes. Any
+ // > character (including a backslash) may be included by
+ // > prefixing the character to be included with a backslash.
+ //
+ // We put an argument on each line, so all we need to do is
+ // ensure the line is interpreted as one whole argument.
+ for c in self.arg.chars() {
+ match c {
+ '\\' | ' ' => write!(f, "\\{}", c)?,
+ c => write!(f, "{}", c)?,
+ }
+ }
+ }
+ Ok(())
+ }
+ }
+}
+
+fn link_output_kind(sess: &Session, crate_type: CrateType) -> LinkOutputKind {
+ let kind = match (crate_type, sess.crt_static(Some(crate_type)), sess.relocation_model()) {
+ (CrateType::Executable, _, _) if sess.is_wasi_reactor() => LinkOutputKind::WasiReactorExe,
+ (CrateType::Executable, false, RelocModel::Pic | RelocModel::Pie) => {
+ LinkOutputKind::DynamicPicExe
+ }
+ (CrateType::Executable, false, _) => LinkOutputKind::DynamicNoPicExe,
+ (CrateType::Executable, true, RelocModel::Pic | RelocModel::Pie) => {
+ LinkOutputKind::StaticPicExe
+ }
+ (CrateType::Executable, true, _) => LinkOutputKind::StaticNoPicExe,
+ (_, true, _) => LinkOutputKind::StaticDylib,
+ (_, false, _) => LinkOutputKind::DynamicDylib,
+ };
+
+ // Adjust the output kind to target capabilities.
+ let opts = &sess.target;
+ let pic_exe_supported = opts.position_independent_executables;
+ let static_pic_exe_supported = opts.static_position_independent_executables;
+ let static_dylib_supported = opts.crt_static_allows_dylibs;
+ match kind {
+ LinkOutputKind::DynamicPicExe if !pic_exe_supported => LinkOutputKind::DynamicNoPicExe,
+ LinkOutputKind::StaticPicExe if !static_pic_exe_supported => LinkOutputKind::StaticNoPicExe,
+ LinkOutputKind::StaticDylib if !static_dylib_supported => LinkOutputKind::DynamicDylib,
+ _ => kind,
+ }
+}
+
+// Returns true if linker is located within sysroot
+fn detect_self_contained_mingw(sess: &Session) -> bool {
+ let (linker, _) = linker_and_flavor(&sess);
+ // Assume `-C linker=rust-lld` as self-contained mode
+ if linker == Path::new("rust-lld") {
+ return true;
+ }
+ let linker_with_extension = if cfg!(windows) && linker.extension().is_none() {
+ linker.with_extension("exe")
+ } else {
+ linker
+ };
+ for dir in env::split_paths(&env::var_os("PATH").unwrap_or_default()) {
+ let full_path = dir.join(&linker_with_extension);
+ // If linker comes from sysroot assume self-contained mode
+ if full_path.is_file() && !full_path.starts_with(&sess.sysroot) {
+ return false;
+ }
+ }
+ true
+}
+
+/// Whether we link to our own CRT objects instead of relying on gcc to pull them.
+/// We only provide such support for a very limited number of targets.
+fn crt_objects_fallback(sess: &Session, crate_type: CrateType) -> bool {
+ if let Some(self_contained) = sess.opts.cg.link_self_contained {
+ return self_contained;
+ }
+
+ match sess.target.crt_objects_fallback {
+ // FIXME: Find a better heuristic for "native musl toolchain is available",
+ // based on host and linker path, for example.
+ // (https://github.com/rust-lang/rust/pull/71769#issuecomment-626330237).
+ Some(CrtObjectsFallback::Musl) => sess.crt_static(Some(crate_type)),
+ Some(CrtObjectsFallback::Mingw) => {
+ sess.host == sess.target
+ && sess.target.vendor != "uwp"
+ && detect_self_contained_mingw(&sess)
+ }
+ // FIXME: Figure out cases in which WASM needs to link with a native toolchain.
+ Some(CrtObjectsFallback::Wasm) => true,
+ None => false,
+ }
+}
+
+/// Add pre-link object files defined by the target spec.
+fn add_pre_link_objects(
+ cmd: &mut dyn Linker,
+ sess: &Session,
+ link_output_kind: LinkOutputKind,
+ self_contained: bool,
+) {
+ let opts = &sess.target;
+ let objects =
+ if self_contained { &opts.pre_link_objects_fallback } else { &opts.pre_link_objects };
+ for obj in objects.get(&link_output_kind).iter().copied().flatten() {
+ cmd.add_object(&get_object_file_path(sess, obj, self_contained));
+ }
+}
+
+/// Add post-link object files defined by the target spec.
+fn add_post_link_objects(
+ cmd: &mut dyn Linker,
+ sess: &Session,
+ link_output_kind: LinkOutputKind,
+ self_contained: bool,
+) {
+ let opts = &sess.target;
+ let objects =
+ if self_contained { &opts.post_link_objects_fallback } else { &opts.post_link_objects };
+ for obj in objects.get(&link_output_kind).iter().copied().flatten() {
+ cmd.add_object(&get_object_file_path(sess, obj, self_contained));
+ }
+}
+
+/// Add arbitrary "pre-link" args defined by the target spec or from command line.
+/// FIXME: Determine where exactly these args need to be inserted.
+fn add_pre_link_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
+ if let Some(args) = sess.target.pre_link_args.get(&flavor) {
+ cmd.args(args.iter().map(Deref::deref));
+ }
+ cmd.args(&sess.opts.unstable_opts.pre_link_args);
+}
+
+/// Add a link script embedded in the target, if applicable.
+fn add_link_script(cmd: &mut dyn Linker, sess: &Session, tmpdir: &Path, crate_type: CrateType) {
+ match (crate_type, &sess.target.link_script) {
+ (CrateType::Cdylib | CrateType::Executable, Some(script)) => {
+ if !sess.target.linker_is_gnu {
+ sess.fatal("can only use link script when linking with GNU-like linker");
+ }
+
+ let file_name = ["rustc", &sess.target.llvm_target, "linkfile.ld"].join("-");
+
+ let path = tmpdir.join(file_name);
+ if let Err(e) = fs::write(&path, script.as_ref()) {
+ sess.fatal(&format!("failed to write link script to {}: {}", path.display(), e));
+ }
+
+ cmd.arg("--script");
+ cmd.arg(path);
+ }
+ _ => {}
+ }
+}
+
+/// Add arbitrary "user defined" args defined from command line.
+/// FIXME: Determine where exactly these args need to be inserted.
+fn add_user_defined_link_args(cmd: &mut dyn Linker, sess: &Session) {
+ cmd.args(&sess.opts.cg.link_args);
+}
+
+/// Add arbitrary "late link" args defined by the target spec.
+/// FIXME: Determine where exactly these args need to be inserted.
+fn add_late_link_args(
+ cmd: &mut dyn Linker,
+ sess: &Session,
+ flavor: LinkerFlavor,
+ crate_type: CrateType,
+ codegen_results: &CodegenResults,
+) {
+ let any_dynamic_crate = crate_type == CrateType::Dylib
+ || codegen_results.crate_info.dependency_formats.iter().any(|(ty, list)| {
+ *ty == crate_type && list.iter().any(|&linkage| linkage == Linkage::Dynamic)
+ });
+ if any_dynamic_crate {
+ if let Some(args) = sess.target.late_link_args_dynamic.get(&flavor) {
+ cmd.args(args.iter().map(Deref::deref));
+ }
+ } else {
+ if let Some(args) = sess.target.late_link_args_static.get(&flavor) {
+ cmd.args(args.iter().map(Deref::deref));
+ }
+ }
+ if let Some(args) = sess.target.late_link_args.get(&flavor) {
+ cmd.args(args.iter().map(Deref::deref));
+ }
+}
+
+/// Add arbitrary "post-link" args defined by the target spec.
+/// FIXME: Determine where exactly these args need to be inserted.
+fn add_post_link_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
+ if let Some(args) = sess.target.post_link_args.get(&flavor) {
+ cmd.args(args.iter().map(Deref::deref));
+ }
+}
+
+/// Add a synthetic object file that contains reference to all symbols that we want to expose to
+/// the linker.
+///
+/// Background: we implement rlibs as static library (archives). Linkers treat archives
+/// differently from object files: all object files participate in linking, while archives will
+/// only participate in linking if they can satisfy at least one undefined reference (version
+/// scripts doesn't count). This causes `#[no_mangle]` or `#[used]` items to be ignored by the
+/// linker, and since they never participate in the linking, using `KEEP` in the linker scripts
+/// can't keep them either. This causes #47384.
+///
+/// To keep them around, we could use `--whole-archive` and equivalents to force rlib to
+/// participate in linking like object files, but this proves to be expensive (#93791). Therefore
+/// we instead just introduce an undefined reference to them. This could be done by `-u` command
+/// line option to the linker or `EXTERN(...)` in linker scripts, however they does not only
+/// introduce an undefined reference, but also make them the GC roots, preventing `--gc-sections`
+/// from removing them, and this is especially problematic for embedded programming where every
+/// byte counts.
+///
+/// This method creates a synthetic object file, which contains undefined references to all symbols
+/// that are necessary for the linking. They are only present in symbol table but not actually
+/// used in any sections, so the linker will therefore pick relevant rlibs for linking, but
+/// unused `#[no_mangle]` or `#[used]` can still be discard by GC sections.
+fn add_linked_symbol_object(
+ cmd: &mut dyn Linker,
+ sess: &Session,
+ tmpdir: &Path,
+ symbols: &[(String, SymbolExportKind)],
+) {
+ if symbols.is_empty() {
+ return;
+ }
+
+ let Some(mut file) = super::metadata::create_object_file(sess) else {
+ return;
+ };
+
+ // NOTE(nbdd0121): MSVC will hang if the input object file contains no sections,
+ // so add an empty section.
+ if file.format() == object::BinaryFormat::Coff {
+ file.add_section(Vec::new(), ".text".into(), object::SectionKind::Text);
+
+ // We handle the name decoration of COFF targets in `symbol_export.rs`, so disable the
+ // default mangler in `object` crate.
+ file.set_mangling(object::write::Mangling::None);
+
+ // Add feature flags to the object file. On MSVC this is optional but LLD will complain if
+ // not present.
+ let mut feature = 0;
+
+ if file.architecture() == object::Architecture::I386 {
+ // Indicate that all SEH handlers are registered in .sxdata section.
+ // We don't have generate any code, so we don't need .sxdata section but LLD still
+ // expects us to set this bit (see #96498).
+ // Reference: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
+ feature |= 1;
+ }
+
+ file.add_symbol(object::write::Symbol {
+ name: "@feat.00".into(),
+ value: feature,
+ size: 0,
+ kind: object::SymbolKind::Data,
+ scope: object::SymbolScope::Compilation,
+ weak: false,
+ section: object::write::SymbolSection::Absolute,
+ flags: object::SymbolFlags::None,
+ });
+ }
+
+ for (sym, kind) in symbols.iter() {
+ file.add_symbol(object::write::Symbol {
+ name: sym.clone().into(),
+ value: 0,
+ size: 0,
+ kind: match kind {
+ SymbolExportKind::Text => object::SymbolKind::Text,
+ SymbolExportKind::Data => object::SymbolKind::Data,
+ SymbolExportKind::Tls => object::SymbolKind::Tls,
+ },
+ scope: object::SymbolScope::Unknown,
+ weak: false,
+ section: object::write::SymbolSection::Undefined,
+ flags: object::SymbolFlags::None,
+ });
+ }
+
+ let path = tmpdir.join("symbols.o");
+ let result = std::fs::write(&path, file.write().unwrap());
+ if let Err(e) = result {
+ sess.fatal(&format!("failed to write {}: {}", path.display(), e));
+ }
+ cmd.add_object(&path);
+}
+
+/// Add object files containing code from the current crate.
+fn add_local_crate_regular_objects(cmd: &mut dyn Linker, codegen_results: &CodegenResults) {
+ for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) {
+ cmd.add_object(obj);
+ }
+}
+
+/// Add object files for allocator code linked once for the whole crate tree.
+fn add_local_crate_allocator_objects(cmd: &mut dyn Linker, codegen_results: &CodegenResults) {
+ if let Some(obj) = codegen_results.allocator_module.as_ref().and_then(|m| m.object.as_ref()) {
+ cmd.add_object(obj);
+ }
+}
+
+/// Add object files containing metadata for the current crate.
+fn add_local_crate_metadata_objects(
+ cmd: &mut dyn Linker,
+ crate_type: CrateType,
+ codegen_results: &CodegenResults,
+) {
+ // When linking a dynamic library, we put the metadata into a section of the
+ // executable. This metadata is in a separate object file from the main
+ // object file, so we link that in here.
+ if crate_type == CrateType::Dylib || crate_type == CrateType::ProcMacro {
+ if let Some(obj) = codegen_results.metadata_module.as_ref().and_then(|m| m.object.as_ref())
+ {
+ cmd.add_object(obj);
+ }
+ }
+}
+
+/// Add sysroot and other globally set directories to the directory search list.
+fn add_library_search_dirs(cmd: &mut dyn Linker, sess: &Session, self_contained: bool) {
+ // The default library location, we need this to find the runtime.
+ // The location of crates will be determined as needed.
+ let lib_path = sess.target_filesearch(PathKind::All).get_lib_path();
+ cmd.include_path(&fix_windows_verbatim_for_gcc(&lib_path));
+
+ // Special directory with libraries used only in self-contained linkage mode
+ if self_contained {
+ let lib_path = sess.target_filesearch(PathKind::All).get_self_contained_lib_path();
+ cmd.include_path(&fix_windows_verbatim_for_gcc(&lib_path));
+ }
+}
+
+/// Add options making relocation sections in the produced ELF files read-only
+/// and suppressing lazy binding.
+fn add_relro_args(cmd: &mut dyn Linker, sess: &Session) {
+ match sess.opts.unstable_opts.relro_level.unwrap_or(sess.target.relro_level) {
+ RelroLevel::Full => cmd.full_relro(),
+ RelroLevel::Partial => cmd.partial_relro(),
+ RelroLevel::Off => cmd.no_relro(),
+ RelroLevel::None => {}
+ }
+}
+
+/// Add library search paths used at runtime by dynamic linkers.
+fn add_rpath_args(
+ cmd: &mut dyn Linker,
+ sess: &Session,
+ codegen_results: &CodegenResults,
+ out_filename: &Path,
+) {
+ // FIXME (#2397): At some point we want to rpath our guesses as to
+ // where extern libraries might live, based on the
+ // add_lib_search_paths
+ if sess.opts.cg.rpath {
+ let libs = codegen_results
+ .crate_info
+ .used_crates
+ .iter()
+ .filter_map(|cnum| {
+ codegen_results.crate_info.used_crate_source[cnum]
+ .dylib
+ .as_ref()
+ .map(|(path, _)| &**path)
+ })
+ .collect::<Vec<_>>();
+ let mut rpath_config = RPathConfig {
+ libs: &*libs,
+ out_filename: out_filename.to_path_buf(),
+ has_rpath: sess.target.has_rpath,
+ is_like_osx: sess.target.is_like_osx,
+ linker_is_gnu: sess.target.linker_is_gnu,
+ };
+ cmd.args(&rpath::get_rpath_flags(&mut rpath_config));
+ }
+}
+
+/// Produce the linker command line containing linker path and arguments.
+///
+/// When comments in the function say "order-(in)dependent" they mean order-dependence between
+/// options and libraries/object files. For example `--whole-archive` (order-dependent) applies
+/// to specific libraries passed after it, and `-o` (output file, order-independent) applies
+/// to the linking process as a whole.
+/// Order-independent options may still override each other in order-dependent fashion,
+/// e.g `--foo=yes --foo=no` may be equivalent to `--foo=no`.
+fn linker_with_args<'a>(
+ path: &Path,
+ flavor: LinkerFlavor,
+ sess: &'a Session,
+ archive_builder_builder: &dyn ArchiveBuilderBuilder,
+ crate_type: CrateType,
+ tmpdir: &Path,
+ out_filename: &Path,
+ codegen_results: &CodegenResults,
+) -> Result<Command, ErrorGuaranteed> {
+ let crt_objects_fallback = crt_objects_fallback(sess, crate_type);
+ let cmd = &mut *super::linker::get_linker(
+ sess,
+ path,
+ flavor,
+ crt_objects_fallback,
+ &codegen_results.crate_info.target_cpu,
+ );
+ let link_output_kind = link_output_kind(sess, crate_type);
+
+ // ------------ Early order-dependent options ------------
+
+ // If we're building something like a dynamic library then some platforms
+ // need to make sure that all symbols are exported correctly from the
+ // dynamic library.
+ // Must be passed before any libraries to prevent the symbols to export from being thrown away,
+ // at least on some platforms (e.g. windows-gnu).
+ cmd.export_symbols(
+ tmpdir,
+ crate_type,
+ &codegen_results.crate_info.exported_symbols[&crate_type],
+ );
+
+ // Can be used for adding custom CRT objects or overriding order-dependent options above.
+ // FIXME: In practice built-in target specs use this for arbitrary order-independent options,
+ // introduce a target spec option for order-independent linker options and migrate built-in
+ // specs to it.
+ add_pre_link_args(cmd, sess, flavor);
+
+ // ------------ Object code and libraries, order-dependent ------------
+
+ // Pre-link CRT objects.
+ add_pre_link_objects(cmd, sess, link_output_kind, crt_objects_fallback);
+
+ add_linked_symbol_object(
+ cmd,
+ sess,
+ tmpdir,
+ &codegen_results.crate_info.linked_symbols[&crate_type],
+ );
+
+ // Sanitizer libraries.
+ add_sanitizer_libraries(sess, crate_type, cmd);
+
+ // Object code from the current crate.
+ // Take careful note of the ordering of the arguments we pass to the linker
+ // here. Linkers will assume that things on the left depend on things to the
+ // right. Things on the right cannot depend on things on the left. This is
+ // all formally implemented in terms of resolving symbols (libs on the right
+ // resolve unknown symbols of libs on the left, but not vice versa).
+ //
+ // For this reason, we have organized the arguments we pass to the linker as
+ // such:
+ //
+ // 1. The local object that LLVM just generated
+ // 2. Local native libraries
+ // 3. Upstream rust libraries
+ // 4. Upstream native libraries
+ //
+ // The rationale behind this ordering is that those items lower down in the
+ // list can't depend on items higher up in the list. For example nothing can
+ // depend on what we just generated (e.g., that'd be a circular dependency).
+ // Upstream rust libraries are not supposed to depend on our local native
+ // libraries as that would violate the structure of the DAG, in that
+ // scenario they are required to link to them as well in a shared fashion.
+ // (The current implementation still doesn't prevent it though, see the FIXME below.)
+ //
+ // Note that upstream rust libraries may contain native dependencies as
+ // well, but they also can't depend on what we just started to add to the
+ // link line. And finally upstream native libraries can't depend on anything
+ // in this DAG so far because they can only depend on other native libraries
+ // and such dependencies are also required to be specified.
+ add_local_crate_regular_objects(cmd, codegen_results);
+ add_local_crate_metadata_objects(cmd, crate_type, codegen_results);
+ add_local_crate_allocator_objects(cmd, codegen_results);
+
+ // Avoid linking to dynamic libraries unless they satisfy some undefined symbols
+ // at the point at which they are specified on the command line.
+ // Must be passed before any (dynamic) libraries to have effect on them.
+ // On Solaris-like systems, `-z ignore` acts as both `--as-needed` and `--gc-sections`
+ // so it will ignore unreferenced ELF sections from relocatable objects.
+ // For that reason, we put this flag after metadata objects as they would otherwise be removed.
+ // FIXME: Support more fine-grained dead code removal on Solaris/illumos
+ // and move this option back to the top.
+ cmd.add_as_needed();
+
+ // FIXME: Move this below to other native libraries
+ // (or alternatively link all native libraries after their respective crates).
+ // This change is somewhat breaking in practice due to local static libraries being linked
+ // as whole-archive (#85144), so removing whole-archive may be a pre-requisite.
+ if sess.opts.unstable_opts.link_native_libraries {
+ add_local_native_libraries(cmd, sess, codegen_results);
+ }
+
+ // Upstream rust libraries and their non-bundled static libraries
+ add_upstream_rust_crates(
+ cmd,
+ sess,
+ archive_builder_builder,
+ codegen_results,
+ crate_type,
+ tmpdir,
+ );
+
+ // Upstream dynamic native libraries linked with `#[link]` attributes at and `-l`
+ // command line options.
+ // If -Zlink-native-libraries=false is set, then the assumption is that an
+ // external build system already has the native dependencies defined, and it
+ // will provide them to the linker itself.
+ if sess.opts.unstable_opts.link_native_libraries {
+ add_upstream_native_libraries(cmd, sess, codegen_results);
+ }
+
+ // Link with the import library generated for any raw-dylib functions.
+ for (raw_dylib_name, raw_dylib_imports) in
+ collate_raw_dylibs(sess, &codegen_results.crate_info.used_libraries)?
+ {
+ cmd.add_object(&archive_builder_builder.create_dll_import_lib(
+ sess,
+ &raw_dylib_name,
+ &raw_dylib_imports,
+ tmpdir,
+ ));
+ }
+
+ // Library linking above uses some global state for things like `-Bstatic`/`-Bdynamic` to make
+ // command line shorter, reset it to default here before adding more libraries.
+ cmd.reset_per_library_state();
+
+ // FIXME: Built-in target specs occasionally use this for linking system libraries,
+ // eliminate all such uses by migrating them to `#[link]` attributes in `lib(std,c,unwind)`
+ // and remove the option.
+ add_late_link_args(cmd, sess, flavor, crate_type, codegen_results);
+
+ // ------------ Arbitrary order-independent options ------------
+
+ // Add order-independent options determined by rustc from its compiler options,
+ // target properties and source code.
+ add_order_independent_options(
+ cmd,
+ sess,
+ link_output_kind,
+ crt_objects_fallback,
+ flavor,
+ crate_type,
+ codegen_results,
+ out_filename,
+ tmpdir,
+ );
+
+ // Can be used for arbitrary order-independent options.
+ // In practice may also be occasionally used for linking native libraries.
+ // Passed after compiler-generated options to support manual overriding when necessary.
+ add_user_defined_link_args(cmd, sess);
+
+ // ------------ Object code and libraries, order-dependent ------------
+
+ // Post-link CRT objects.
+ add_post_link_objects(cmd, sess, link_output_kind, crt_objects_fallback);
+
+ // ------------ Late order-dependent options ------------
+
+ // Doesn't really make sense.
+ // FIXME: In practice built-in target specs use this for arbitrary order-independent options,
+ // introduce a target spec option for order-independent linker options, migrate built-in specs
+ // to it and remove the option.
+ add_post_link_args(cmd, sess, flavor);
+
+ Ok(cmd.take_cmd())
+}
+
+fn add_order_independent_options(
+ cmd: &mut dyn Linker,
+ sess: &Session,
+ link_output_kind: LinkOutputKind,
+ crt_objects_fallback: bool,
+ flavor: LinkerFlavor,
+ crate_type: CrateType,
+ codegen_results: &CodegenResults,
+ out_filename: &Path,
+ tmpdir: &Path,
+) {
+ add_gcc_ld_path(cmd, sess, flavor);
+
+ add_apple_sdk(cmd, sess, flavor);
+
+ add_link_script(cmd, sess, tmpdir, crate_type);
+
+ if sess.target.os == "fuchsia" && crate_type == CrateType::Executable {
+ let prefix = if sess.opts.unstable_opts.sanitizer.contains(SanitizerSet::ADDRESS) {
+ "asan/"
+ } else {
+ ""
+ };
+ cmd.arg(format!("--dynamic-linker={}ld.so.1", prefix));
+ }
+
+ if sess.target.eh_frame_header {
+ cmd.add_eh_frame_header();
+ }
+
+ // Make the binary compatible with data execution prevention schemes.
+ cmd.add_no_exec();
+
+ if crt_objects_fallback {
+ cmd.no_crt_objects();
+ }
+
+ if sess.target.os == "emscripten" {
+ cmd.arg("-s");
+ cmd.arg(if sess.panic_strategy() == PanicStrategy::Abort {
+ "DISABLE_EXCEPTION_CATCHING=1"
+ } else {
+ "DISABLE_EXCEPTION_CATCHING=0"
+ });
+ }
+
+ if flavor == LinkerFlavor::PtxLinker {
+ // Provide the linker with fallback to internal `target-cpu`.
+ cmd.arg("--fallback-arch");
+ cmd.arg(&codegen_results.crate_info.target_cpu);
+ } else if flavor == LinkerFlavor::BpfLinker {
+ cmd.arg("--cpu");
+ cmd.arg(&codegen_results.crate_info.target_cpu);
+ cmd.arg("--cpu-features");
+ cmd.arg(match &sess.opts.cg.target_feature {
+ feat if !feat.is_empty() => feat.as_ref(),
+ _ => sess.target.options.features.as_ref(),
+ });
+ }
+
+ cmd.linker_plugin_lto();
+
+ add_library_search_dirs(cmd, sess, crt_objects_fallback);
+
+ cmd.output_filename(out_filename);
+
+ if crate_type == CrateType::Executable && sess.target.is_like_windows {
+ if let Some(ref s) = codegen_results.crate_info.windows_subsystem {
+ cmd.subsystem(s);
+ }
+ }
+
+ // Try to strip as much out of the generated object by removing unused
+ // sections if possible. See more comments in linker.rs
+ if !sess.link_dead_code() {
+ // If PGO is enabled sometimes gc_sections will remove the profile data section
+ // as it appears to be unused. This can then cause the PGO profile file to lose
+ // some functions. If we are generating a profile we shouldn't strip those metadata
+ // sections to ensure we have all the data for PGO.
+ let keep_metadata =
+ crate_type == CrateType::Dylib || sess.opts.cg.profile_generate.enabled();
+ if crate_type != CrateType::Executable || !sess.opts.unstable_opts.export_executable_symbols
+ {
+ cmd.gc_sections(keep_metadata);
+ } else {
+ cmd.no_gc_sections();
+ }
+ }
+
+ cmd.set_output_kind(link_output_kind, out_filename);
+
+ add_relro_args(cmd, sess);
+
+ // Pass optimization flags down to the linker.
+ cmd.optimize();
+
+ // Gather the set of NatVis files, if any, and write them out to a temp directory.
+ let natvis_visualizers = collect_natvis_visualizers(
+ tmpdir,
+ sess,
+ &codegen_results.crate_info.local_crate_name,
+ &codegen_results.crate_info.natvis_debugger_visualizers,
+ );
+
+ // Pass debuginfo, NatVis debugger visualizers and strip flags down to the linker.
+ cmd.debuginfo(strip_value(sess), &natvis_visualizers);
+
+ // We want to prevent the compiler from accidentally leaking in any system libraries,
+ // so by default we tell linkers not to link to any default libraries.
+ if !sess.opts.cg.default_linker_libraries && sess.target.no_default_libraries {
+ cmd.no_default_libraries();
+ }
+
+ if sess.opts.cg.profile_generate.enabled() || sess.instrument_coverage() {
+ cmd.pgo_gen();
+ }
+
+ if sess.opts.cg.control_flow_guard != CFGuard::Disabled {
+ cmd.control_flow_guard();
+ }
+
+ add_rpath_args(cmd, sess, codegen_results, out_filename);
+}
+
+// Write the NatVis debugger visualizer files for each crate to the temp directory and gather the file paths.
+fn collect_natvis_visualizers(
+ tmpdir: &Path,
+ sess: &Session,
+ crate_name: &Symbol,
+ natvis_debugger_visualizers: &BTreeSet<DebuggerVisualizerFile>,
+) -> Vec<PathBuf> {
+ let mut visualizer_paths = Vec::with_capacity(natvis_debugger_visualizers.len());
+
+ for (index, visualizer) in natvis_debugger_visualizers.iter().enumerate() {
+ let visualizer_out_file = tmpdir.join(format!("{}-{}.natvis", crate_name.as_str(), index));
+
+ match fs::write(&visualizer_out_file, &visualizer.src) {
+ Ok(()) => {
+ visualizer_paths.push(visualizer_out_file);
+ }
+ Err(error) => {
+ sess.warn(
+ format!(
+ "Unable to write debugger visualizer file `{}`: {} ",
+ visualizer_out_file.display(),
+ error
+ )
+ .as_str(),
+ );
+ }
+ };
+ }
+ visualizer_paths
+}
+
+/// # Native library linking
+///
+/// User-supplied library search paths (-L on the command line). These are the same paths used to
+/// find Rust crates, so some of them may have been added already by the previous crate linking
+/// code. This only allows them to be found at compile time so it is still entirely up to outside
+/// forces to make sure that library can be found at runtime.
+///
+/// Also note that the native libraries linked here are only the ones located in the current crate.
+/// Upstream crates with native library dependencies may have their native library pulled in above.
+fn add_local_native_libraries(
+ cmd: &mut dyn Linker,
+ sess: &Session,
+ codegen_results: &CodegenResults,
+) {
+ let filesearch = sess.target_filesearch(PathKind::All);
+ for search_path in filesearch.search_paths() {
+ match search_path.kind {
+ PathKind::Framework => {
+ cmd.framework_path(&search_path.dir);
+ }
+ _ => {
+ cmd.include_path(&fix_windows_verbatim_for_gcc(&search_path.dir));
+ }
+ }
+ }
+
+ let relevant_libs =
+ codegen_results.crate_info.used_libraries.iter().filter(|l| relevant_lib(sess, l));
+
+ let search_path = OnceCell::new();
+ let mut last = (None, NativeLibKind::Unspecified, None);
+ for lib in relevant_libs {
+ let Some(name) = lib.name else {
+ continue;
+ };
+ let name = name.as_str();
+
+ // Skip if this library is the same as the last.
+ last = if (lib.name, lib.kind, lib.verbatim) == last {
+ continue;
+ } else {
+ (lib.name, lib.kind, lib.verbatim)
+ };
+
+ let verbatim = lib.verbatim.unwrap_or(false);
+ match lib.kind {
+ NativeLibKind::Dylib { as_needed } => {
+ cmd.link_dylib(name, verbatim, as_needed.unwrap_or(true))
+ }
+ NativeLibKind::Unspecified => cmd.link_dylib(name, verbatim, true),
+ NativeLibKind::Framework { as_needed } => {
+ cmd.link_framework(name, as_needed.unwrap_or(true))
+ }
+ NativeLibKind::Static { whole_archive, bundle, .. } => {
+ if whole_archive == Some(true)
+ // Backward compatibility case: this can be a rlib (so `+whole-archive` cannot
+ // be added explicitly if necessary, see the error in `fn link_rlib`) compiled
+ // as an executable due to `--test`. Use whole-archive implicitly, like before
+ // the introduction of native lib modifiers.
+ || (whole_archive == None && bundle != Some(false) && sess.opts.test)
+ {
+ cmd.link_whole_staticlib(
+ name,
+ verbatim,
+ &search_path.get_or_init(|| archive_search_paths(sess)),
+ );
+ } else {
+ cmd.link_staticlib(name, verbatim)
+ }
+ }
+ NativeLibKind::RawDylib => {
+ // Ignore RawDylib here, they are handled separately in linker_with_args().
+ }
+ NativeLibKind::LinkArg => {
+ cmd.arg(name);
+ }
+ }
+ }
+}
+
+/// # Linking Rust crates and their non-bundled static libraries
+///
+/// Rust crates are not considered at all when creating an rlib output. All dependencies will be
+/// linked when producing the final output (instead of the intermediate rlib version).
+fn add_upstream_rust_crates<'a>(
+ cmd: &mut dyn Linker,
+ sess: &'a Session,
+ archive_builder_builder: &dyn ArchiveBuilderBuilder,
+ codegen_results: &CodegenResults,
+ crate_type: CrateType,
+ tmpdir: &Path,
+) {
+ // All of the heavy lifting has previously been accomplished by the
+ // dependency_format module of the compiler. This is just crawling the
+ // output of that module, adding crates as necessary.
+ //
+ // Linking to a rlib involves just passing it to the linker (the linker
+ // will slurp up the object files inside), and linking to a dynamic library
+ // involves just passing the right -l flag.
+
+ let (_, data) = codegen_results
+ .crate_info
+ .dependency_formats
+ .iter()
+ .find(|(ty, _)| *ty == crate_type)
+ .expect("failed to find crate type in dependency format list");
+
+ // Invoke get_used_crates to ensure that we get a topological sorting of
+ // crates.
+ let deps = &codegen_results.crate_info.used_crates;
+
+ // There's a few internal crates in the standard library (aka libcore and
+ // libstd) which actually have a circular dependence upon one another. This
+ // currently arises through "weak lang items" where libcore requires things
+ // like `rust_begin_unwind` but libstd ends up defining it. To get this
+ // circular dependence to work correctly in all situations we'll need to be
+ // sure to correctly apply the `--start-group` and `--end-group` options to
+ // GNU linkers, otherwise if we don't use any other symbol from the standard
+ // library it'll get discarded and the whole application won't link.
+ //
+ // In this loop we're calculating the `group_end`, after which crate to
+ // pass `--end-group` and `group_start`, before which crate to pass
+ // `--start-group`. We currently do this by passing `--end-group` after
+ // the first crate (when iterating backwards) that requires a lang item
+ // defined somewhere else. Once that's set then when we've defined all the
+ // necessary lang items we'll pass `--start-group`.
+ //
+ // Note that this isn't amazing logic for now but it should do the trick
+ // for the current implementation of the standard library.
+ let mut group_end = None;
+ let mut group_start = None;
+ // Crates available for linking thus far.
+ let mut available = FxHashSet::default();
+ // Crates required to satisfy dependencies discovered so far.
+ let mut required = FxHashSet::default();
+
+ let info = &codegen_results.crate_info;
+ for &cnum in deps.iter().rev() {
+ if let Some(missing) = info.missing_lang_items.get(&cnum) {
+ let missing_crates = missing.iter().map(|i| info.lang_item_to_crate.get(i).copied());
+ required.extend(missing_crates);
+ }
+
+ required.insert(Some(cnum));
+ available.insert(Some(cnum));
+
+ if required.len() > available.len() && group_end.is_none() {
+ group_end = Some(cnum);
+ }
+ if required.len() == available.len() && group_end.is_some() {
+ group_start = Some(cnum);
+ break;
+ }
+ }
+
+ // If we didn't end up filling in all lang items from upstream crates then
+ // we'll be filling it in with our crate. This probably means we're the
+ // standard library itself, so skip this for now.
+ if group_end.is_some() && group_start.is_none() {
+ group_end = None;
+ }
+
+ let mut compiler_builtins = None;
+ let search_path = OnceCell::new();
+
+ for &cnum in deps.iter() {
+ if group_start == Some(cnum) {
+ cmd.group_start();
+ }
+
+ // We may not pass all crates through to the linker. Some crates may
+ // appear statically in an existing dylib, meaning we'll pick up all the
+ // symbols from the dylib.
+ let src = &codegen_results.crate_info.used_crate_source[&cnum];
+ match data[cnum.as_usize() - 1] {
+ _ if codegen_results.crate_info.profiler_runtime == Some(cnum) => {
+ add_static_crate(cmd, sess, archive_builder_builder, codegen_results, tmpdir, cnum);
+ }
+ // compiler-builtins are always placed last to ensure that they're
+ // linked correctly.
+ _ if codegen_results.crate_info.compiler_builtins == Some(cnum) => {
+ assert!(compiler_builtins.is_none());
+ compiler_builtins = Some(cnum);
+ }
+ Linkage::NotLinked | Linkage::IncludedFromDylib => {}
+ Linkage::Static => {
+ add_static_crate(cmd, sess, archive_builder_builder, codegen_results, tmpdir, cnum);
+
+ // Link static native libs with "-bundle" modifier only if the crate they originate from
+ // is being linked statically to the current crate. If it's linked dynamically
+ // or is an rlib already included via some other dylib crate, the symbols from
+ // native libs will have already been included in that dylib.
+ //
+ // If -Zlink-native-libraries=false is set, then the assumption is that an
+ // external build system already has the native dependencies defined, and it
+ // will provide them to the linker itself.
+ if sess.opts.unstable_opts.link_native_libraries {
+ let mut last = (None, NativeLibKind::Unspecified, None);
+ for lib in &codegen_results.crate_info.native_libraries[&cnum] {
+ let Some(name) = lib.name else {
+ continue;
+ };
+ let name = name.as_str();
+ if !relevant_lib(sess, lib) {
+ continue;
+ }
+
+ // Skip if this library is the same as the last.
+ last = if (lib.name, lib.kind, lib.verbatim) == last {
+ continue;
+ } else {
+ (lib.name, lib.kind, lib.verbatim)
+ };
+
+ match lib.kind {
+ NativeLibKind::Static {
+ bundle: Some(false),
+ whole_archive: Some(true),
+ } => {
+ cmd.link_whole_staticlib(
+ name,
+ lib.verbatim.unwrap_or(false),
+ search_path.get_or_init(|| archive_search_paths(sess)),
+ );
+ }
+ NativeLibKind::Static {
+ bundle: Some(false),
+ whole_archive: Some(false) | None,
+ } => {
+ cmd.link_staticlib(name, lib.verbatim.unwrap_or(false));
+ }
+ NativeLibKind::LinkArg => {
+ cmd.arg(name);
+ }
+ NativeLibKind::Dylib { .. }
+ | NativeLibKind::Framework { .. }
+ | NativeLibKind::Unspecified
+ | NativeLibKind::RawDylib => {}
+ NativeLibKind::Static {
+ bundle: Some(true) | None,
+ whole_archive: _,
+ } => {}
+ }
+ }
+ }
+ }
+ Linkage::Dynamic => add_dynamic_crate(cmd, sess, &src.dylib.as_ref().unwrap().0),
+ }
+
+ if group_end == Some(cnum) {
+ cmd.group_end();
+ }
+ }
+
+ // compiler-builtins are always placed last to ensure that they're
+ // linked correctly.
+ // We must always link the `compiler_builtins` crate statically. Even if it
+ // was already "included" in a dylib (e.g., `libstd` when `-C prefer-dynamic`
+ // is used)
+ if let Some(cnum) = compiler_builtins {
+ add_static_crate(cmd, sess, archive_builder_builder, codegen_results, tmpdir, cnum);
+ }
+
+ // Converts a library file-stem into a cc -l argument
+ fn unlib<'a>(target: &Target, stem: &'a str) -> &'a str {
+ if stem.starts_with("lib") && !target.is_like_windows { &stem[3..] } else { stem }
+ }
+
+ // Adds the static "rlib" versions of all crates to the command line.
+ // There's a bit of magic which happens here specifically related to LTO,
+ // namely that we remove upstream object files.
+ //
+ // When performing LTO, almost(*) all of the bytecode from the upstream
+ // libraries has already been included in our object file output. As a
+ // result we need to remove the object files in the upstream libraries so
+ // the linker doesn't try to include them twice (or whine about duplicate
+ // symbols). We must continue to include the rest of the rlib, however, as
+ // it may contain static native libraries which must be linked in.
+ //
+ // (*) Crates marked with `#![no_builtins]` don't participate in LTO and
+ // their bytecode wasn't included. The object files in those libraries must
+ // still be passed to the linker.
+ //
+ // Note, however, that if we're not doing LTO we can just pass the rlib
+ // blindly to the linker (fast) because it's fine if it's not actually
+ // included as we're at the end of the dependency chain.
+ fn add_static_crate<'a>(
+ cmd: &mut dyn Linker,
+ sess: &'a Session,
+ archive_builder_builder: &dyn ArchiveBuilderBuilder,
+ codegen_results: &CodegenResults,
+ tmpdir: &Path,
+ cnum: CrateNum,
+ ) {
+ let src = &codegen_results.crate_info.used_crate_source[&cnum];
+ let cratepath = &src.rlib.as_ref().unwrap().0;
+
+ let mut link_upstream = |path: &Path| {
+ cmd.link_rlib(&fix_windows_verbatim_for_gcc(path));
+ };
+
+ // See the comment above in `link_staticlib` and `link_rlib` for why if
+ // there's a static library that's not relevant we skip all object
+ // files.
+ let native_libs = &codegen_results.crate_info.native_libraries[&cnum];
+ let skip_native = native_libs.iter().any(|lib| {
+ matches!(lib.kind, NativeLibKind::Static { bundle: None | Some(true), .. })
+ && !relevant_lib(sess, lib)
+ });
+
+ if (!are_upstream_rust_objects_already_included(sess)
+ || ignored_for_lto(sess, &codegen_results.crate_info, cnum))
+ && !skip_native
+ {
+ link_upstream(cratepath);
+ return;
+ }
+
+ let dst = tmpdir.join(cratepath.file_name().unwrap());
+ let name = cratepath.file_name().unwrap().to_str().unwrap();
+ let name = &name[3..name.len() - 5]; // chop off lib/.rlib
+
+ sess.prof.generic_activity_with_arg("link_altering_rlib", name).run(|| {
+ let canonical_name = name.replace('-', "_");
+ let upstream_rust_objects_already_included =
+ are_upstream_rust_objects_already_included(sess);
+ let is_builtins = sess.target.no_builtins
+ || !codegen_results.crate_info.is_no_builtins.contains(&cnum);
+
+ let mut archive = archive_builder_builder.new_archive_builder(sess);
+ if let Err(e) = archive.add_archive(
+ cratepath,
+ Box::new(move |f| {
+ if f == METADATA_FILENAME {
+ return true;
+ }
+
+ let canonical = f.replace('-', "_");
+
+ let is_rust_object =
+ canonical.starts_with(&canonical_name) && looks_like_rust_object_file(&f);
+
+ // If we've been requested to skip all native object files
+ // (those not generated by the rust compiler) then we can skip
+ // this file. See above for why we may want to do this.
+ let skip_because_cfg_say_so = skip_native && !is_rust_object;
+
+ // If we're performing LTO and this is a rust-generated object
+ // file, then we don't need the object file as it's part of the
+ // LTO module. Note that `#![no_builtins]` is excluded from LTO,
+ // though, so we let that object file slide.
+ let skip_because_lto =
+ upstream_rust_objects_already_included && is_rust_object && is_builtins;
+
+ if skip_because_cfg_say_so || skip_because_lto {
+ return true;
+ }
+
+ false
+ }),
+ ) {
+ sess.fatal(&format!("failed to build archive from rlib: {}", e));
+ }
+ if archive.build(&dst) {
+ link_upstream(&dst);
+ }
+ });
+ }
+
+ // Same thing as above, but for dynamic crates instead of static crates.
+ fn add_dynamic_crate(cmd: &mut dyn Linker, sess: &Session, cratepath: &Path) {
+ // Just need to tell the linker about where the library lives and
+ // what its name is
+ let parent = cratepath.parent();
+ if let Some(dir) = parent {
+ cmd.include_path(&fix_windows_verbatim_for_gcc(dir));
+ }
+ let filestem = cratepath.file_stem().unwrap().to_str().unwrap();
+ cmd.link_rust_dylib(
+ &unlib(&sess.target, filestem),
+ parent.unwrap_or_else(|| Path::new("")),
+ );
+ }
+}
+
+/// Link in all of our upstream crates' native dependencies. Remember that all of these upstream
+/// native dependencies are all non-static dependencies. We've got two cases then:
+///
+/// 1. The upstream crate is an rlib. In this case we *must* link in the native dependency because
+/// the rlib is just an archive.
+///
+/// 2. The upstream crate is a dylib. In order to use the dylib, we have to have the dependency
+/// present on the system somewhere. Thus, we don't gain a whole lot from not linking in the
+/// dynamic dependency to this crate as well.
+///
+/// The use case for this is a little subtle. In theory the native dependencies of a crate are
+/// purely an implementation detail of the crate itself, but the problem arises with generic and
+/// inlined functions. If a generic function calls a native function, then the generic function
+/// must be instantiated in the target crate, meaning that the native symbol must also be resolved
+/// in the target crate.
+fn add_upstream_native_libraries(
+ cmd: &mut dyn Linker,
+ sess: &Session,
+ codegen_results: &CodegenResults,
+) {
+ let mut last = (None, NativeLibKind::Unspecified, None);
+ for &cnum in &codegen_results.crate_info.used_crates {
+ for lib in codegen_results.crate_info.native_libraries[&cnum].iter() {
+ let Some(name) = lib.name else {
+ continue;
+ };
+ let name = name.as_str();
+ if !relevant_lib(sess, &lib) {
+ continue;
+ }
+
+ // Skip if this library is the same as the last.
+ last = if (lib.name, lib.kind, lib.verbatim) == last {
+ continue;
+ } else {
+ (lib.name, lib.kind, lib.verbatim)
+ };
+
+ let verbatim = lib.verbatim.unwrap_or(false);
+ match lib.kind {
+ NativeLibKind::Dylib { as_needed } => {
+ cmd.link_dylib(name, verbatim, as_needed.unwrap_or(true))
+ }
+ NativeLibKind::Unspecified => cmd.link_dylib(name, verbatim, true),
+ NativeLibKind::Framework { as_needed } => {
+ cmd.link_framework(name, as_needed.unwrap_or(true))
+ }
+ // ignore static native libraries here as we've
+ // already included them in add_local_native_libraries and
+ // add_upstream_rust_crates
+ NativeLibKind::Static { .. } => {}
+ NativeLibKind::RawDylib | NativeLibKind::LinkArg => {}
+ }
+ }
+ }
+}
+
+fn relevant_lib(sess: &Session, lib: &NativeLib) -> bool {
+ match lib.cfg {
+ Some(ref cfg) => rustc_attr::cfg_matches(cfg, &sess.parse_sess, CRATE_NODE_ID, None),
+ None => true,
+ }
+}
+
+fn are_upstream_rust_objects_already_included(sess: &Session) -> bool {
+ match sess.lto() {
+ config::Lto::Fat => true,
+ config::Lto::Thin => {
+ // If we defer LTO to the linker, we haven't run LTO ourselves, so
+ // any upstream object files have not been copied yet.
+ !sess.opts.cg.linker_plugin_lto.enabled()
+ }
+ config::Lto::No | config::Lto::ThinLocal => false,
+ }
+}
+
+fn add_apple_sdk(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
+ let arch = &sess.target.arch;
+ let os = &sess.target.os;
+ let llvm_target = &sess.target.llvm_target;
+ if sess.target.vendor != "apple"
+ || !matches!(os.as_ref(), "ios" | "tvos" | "watchos")
+ || (flavor != LinkerFlavor::Gcc && flavor != LinkerFlavor::Lld(LldFlavor::Ld64))
+ {
+ return;
+ }
+ let sdk_name = match (arch.as_ref(), os.as_ref()) {
+ ("aarch64", "tvos") => "appletvos",
+ ("x86_64", "tvos") => "appletvsimulator",
+ ("arm", "ios") => "iphoneos",
+ ("aarch64", "ios") if llvm_target.contains("macabi") => "macosx",
+ ("aarch64", "ios") if llvm_target.ends_with("-simulator") => "iphonesimulator",
+ ("aarch64", "ios") => "iphoneos",
+ ("x86", "ios") => "iphonesimulator",
+ ("x86_64", "ios") if llvm_target.contains("macabi") => "macosx",
+ ("x86_64", "ios") => "iphonesimulator",
+ ("x86_64", "watchos") => "watchsimulator",
+ ("arm64_32", "watchos") => "watchos",
+ ("aarch64", "watchos") if llvm_target.ends_with("-simulator") => "watchsimulator",
+ ("aarch64", "watchos") => "watchos",
+ ("arm", "watchos") => "watchos",
+ _ => {
+ sess.err(&format!("unsupported arch `{}` for os `{}`", arch, os));
+ return;
+ }
+ };
+ let sdk_root = match get_apple_sdk_root(sdk_name) {
+ Ok(s) => s,
+ Err(e) => {
+ sess.err(&e);
+ return;
+ }
+ };
+
+ match flavor {
+ LinkerFlavor::Gcc => {
+ cmd.args(&["-isysroot", &sdk_root, "-Wl,-syslibroot", &sdk_root]);
+ }
+ LinkerFlavor::Lld(LldFlavor::Ld64) => {
+ cmd.args(&["-syslibroot", &sdk_root]);
+ }
+ _ => unreachable!(),
+ }
+}
+
+fn get_apple_sdk_root(sdk_name: &str) -> Result<String, String> {
+ // Following what clang does
+ // (https://github.com/llvm/llvm-project/blob/
+ // 296a80102a9b72c3eda80558fb78a3ed8849b341/clang/lib/Driver/ToolChains/Darwin.cpp#L1661-L1678)
+ // to allow the SDK path to be set. (For clang, xcrun sets
+ // SDKROOT; for rustc, the user or build system can set it, or we
+ // can fall back to checking for xcrun on PATH.)
+ if let Ok(sdkroot) = env::var("SDKROOT") {
+ let p = Path::new(&sdkroot);
+ match sdk_name {
+ // Ignore `SDKROOT` if it's clearly set for the wrong platform.
+ "appletvos"
+ if sdkroot.contains("TVSimulator.platform")
+ || sdkroot.contains("MacOSX.platform") => {}
+ "appletvsimulator"
+ if sdkroot.contains("TVOS.platform") || sdkroot.contains("MacOSX.platform") => {}
+ "iphoneos"
+ if sdkroot.contains("iPhoneSimulator.platform")
+ || sdkroot.contains("MacOSX.platform") => {}
+ "iphonesimulator"
+ if sdkroot.contains("iPhoneOS.platform") || sdkroot.contains("MacOSX.platform") => {
+ }
+ "macosx10.15"
+ if sdkroot.contains("iPhoneOS.platform")
+ || sdkroot.contains("iPhoneSimulator.platform") => {}
+ "watchos"
+ if sdkroot.contains("WatchSimulator.platform")
+ || sdkroot.contains("MacOSX.platform") => {}
+ "watchsimulator"
+ if sdkroot.contains("WatchOS.platform") || sdkroot.contains("MacOSX.platform") => {}
+ // Ignore `SDKROOT` if it's not a valid path.
+ _ if !p.is_absolute() || p == Path::new("/") || !p.exists() => {}
+ _ => return Ok(sdkroot),
+ }
+ }
+ let res =
+ Command::new("xcrun").arg("--show-sdk-path").arg("-sdk").arg(sdk_name).output().and_then(
+ |output| {
+ if output.status.success() {
+ Ok(String::from_utf8(output.stdout).unwrap())
+ } else {
+ let error = String::from_utf8(output.stderr);
+ let error = format!("process exit with error: {}", error.unwrap());
+ Err(io::Error::new(io::ErrorKind::Other, &error[..]))
+ }
+ },
+ );
+
+ match res {
+ Ok(output) => Ok(output.trim().to_string()),
+ Err(e) => Err(format!("failed to get {} SDK path: {}", sdk_name, e)),
+ }
+}
+
+fn add_gcc_ld_path(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
+ if let Some(ld_impl) = sess.opts.unstable_opts.gcc_ld {
+ if let LinkerFlavor::Gcc = flavor {
+ match ld_impl {
+ LdImpl::Lld => {
+ let tools_path = sess.get_tools_search_paths(false);
+ let gcc_ld_dir = tools_path
+ .into_iter()
+ .map(|p| p.join("gcc-ld"))
+ .find(|p| {
+ p.join(if sess.host.is_like_windows { "ld.exe" } else { "ld" }).exists()
+ })
+ .unwrap_or_else(|| sess.fatal("rust-lld (as ld) not found"));
+ cmd.arg({
+ let mut arg = OsString::from("-B");
+ arg.push(gcc_ld_dir);
+ arg
+ });
+ cmd.arg(format!("-Wl,-rustc-lld-flavor={}", sess.target.lld_flavor.as_str()));
+ }
+ }
+ } else {
+ sess.fatal("option `-Z gcc-ld` is used even though linker flavor is not gcc");
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs
new file mode 100644
index 000000000..ce51b2e95
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/linker.rs
@@ -0,0 +1,1788 @@
+use super::archive;
+use super::command::Command;
+use super::symbol_export;
+use rustc_span::symbol::sym;
+
+use std::ffi::{OsStr, OsString};
+use std::fs::{self, File};
+use std::io::prelude::*;
+use std::io::{self, BufWriter};
+use std::path::{Path, PathBuf};
+use std::{env, mem, str};
+
+use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
+use rustc_middle::middle::dependency_format::Linkage;
+use rustc_middle::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo, SymbolExportKind};
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{self, CrateType, DebugInfo, LinkerPluginLto, Lto, OptLevel, Strip};
+use rustc_session::Session;
+use rustc_target::spec::{LinkOutputKind, LinkerFlavor, LldFlavor};
+
+use cc::windows_registry;
+
+/// Disables non-English messages from localized linkers.
+/// Such messages may cause issues with text encoding on Windows (#35785)
+/// and prevent inspection of linker output in case of errors, which we occasionally do.
+/// This should be acceptable because other messages from rustc are in English anyway,
+/// and may also be desirable to improve searchability of the linker diagnostics.
+pub fn disable_localization(linker: &mut Command) {
+ // No harm in setting both env vars simultaneously.
+ // Unix-style linkers.
+ linker.env("LC_ALL", "C");
+ // MSVC's `link.exe`.
+ linker.env("VSLANG", "1033");
+}
+
+// The third parameter is for env vars, used on windows to set up the
+// path for MSVC to find its DLLs, and gcc to find its bundled
+// toolchain
+pub fn get_linker<'a>(
+ sess: &'a Session,
+ linker: &Path,
+ flavor: LinkerFlavor,
+ self_contained: bool,
+ target_cpu: &'a str,
+) -> Box<dyn Linker + 'a> {
+ let msvc_tool = windows_registry::find_tool(&sess.opts.target_triple.triple(), "link.exe");
+
+ // If our linker looks like a batch script on Windows then to execute this
+ // we'll need to spawn `cmd` explicitly. This is primarily done to handle
+ // emscripten where the linker is `emcc.bat` and needs to be spawned as
+ // `cmd /c emcc.bat ...`.
+ //
+ // This worked historically but is needed manually since #42436 (regression
+ // was tagged as #42791) and some more info can be found on #44443 for
+ // emscripten itself.
+ let mut cmd = match linker.to_str() {
+ Some(linker) if cfg!(windows) && linker.ends_with(".bat") => Command::bat_script(linker),
+ _ => match flavor {
+ LinkerFlavor::Lld(f) => Command::lld(linker, f),
+ LinkerFlavor::Msvc if sess.opts.cg.linker.is_none() && sess.target.linker.is_none() => {
+ Command::new(msvc_tool.as_ref().map_or(linker, |t| t.path()))
+ }
+ _ => Command::new(linker),
+ },
+ };
+
+ // UWP apps have API restrictions enforced during Store submissions.
+ // To comply with the Windows App Certification Kit,
+ // MSVC needs to link with the Store versions of the runtime libraries (vcruntime, msvcrt, etc).
+ let t = &sess.target;
+ if (flavor == LinkerFlavor::Msvc || flavor == LinkerFlavor::Lld(LldFlavor::Link))
+ && t.vendor == "uwp"
+ {
+ if let Some(ref tool) = msvc_tool {
+ let original_path = tool.path();
+ if let Some(ref root_lib_path) = original_path.ancestors().nth(4) {
+ let arch = match t.arch.as_ref() {
+ "x86_64" => Some("x64"),
+ "x86" => Some("x86"),
+ "aarch64" => Some("arm64"),
+ "arm" => Some("arm"),
+ _ => None,
+ };
+ if let Some(ref a) = arch {
+ // FIXME: Move this to `fn linker_with_args`.
+ let mut arg = OsString::from("/LIBPATH:");
+ arg.push(format!("{}\\lib\\{}\\store", root_lib_path.display(), a));
+ cmd.arg(&arg);
+ } else {
+ warn!("arch is not supported");
+ }
+ } else {
+ warn!("MSVC root path lib location not found");
+ }
+ } else {
+ warn!("link.exe not found");
+ }
+ }
+
+ // The compiler's sysroot often has some bundled tools, so add it to the
+ // PATH for the child.
+ let mut new_path = sess.get_tools_search_paths(self_contained);
+ let mut msvc_changed_path = false;
+ if sess.target.is_like_msvc {
+ if let Some(ref tool) = msvc_tool {
+ cmd.args(tool.args());
+ for &(ref k, ref v) in tool.env() {
+ if k == "PATH" {
+ new_path.extend(env::split_paths(v));
+ msvc_changed_path = true;
+ } else {
+ cmd.env(k, v);
+ }
+ }
+ }
+ }
+
+ if !msvc_changed_path {
+ if let Some(path) = env::var_os("PATH") {
+ new_path.extend(env::split_paths(&path));
+ }
+ }
+ cmd.env("PATH", env::join_paths(new_path).unwrap());
+
+ // FIXME: Move `/LIBPATH` addition for uwp targets from the linker construction
+ // to the linker args construction.
+ assert!(cmd.get_args().is_empty() || sess.target.vendor == "uwp");
+ match flavor {
+ LinkerFlavor::Lld(LldFlavor::Link) | LinkerFlavor::Msvc => {
+ Box::new(MsvcLinker { cmd, sess }) as Box<dyn Linker>
+ }
+ LinkerFlavor::Em => Box::new(EmLinker { cmd, sess }) as Box<dyn Linker>,
+ LinkerFlavor::Gcc => {
+ Box::new(GccLinker { cmd, sess, target_cpu, hinted_static: false, is_ld: false })
+ as Box<dyn Linker>
+ }
+
+ LinkerFlavor::Lld(LldFlavor::Ld)
+ | LinkerFlavor::Lld(LldFlavor::Ld64)
+ | LinkerFlavor::Ld => {
+ Box::new(GccLinker { cmd, sess, target_cpu, hinted_static: false, is_ld: true })
+ as Box<dyn Linker>
+ }
+
+ LinkerFlavor::Lld(LldFlavor::Wasm) => Box::new(WasmLd::new(cmd, sess)) as Box<dyn Linker>,
+
+ LinkerFlavor::PtxLinker => Box::new(PtxLinker { cmd, sess }) as Box<dyn Linker>,
+
+ LinkerFlavor::BpfLinker => Box::new(BpfLinker { cmd, sess }) as Box<dyn Linker>,
+
+ LinkerFlavor::L4Bender => Box::new(L4Bender::new(cmd, sess)) as Box<dyn Linker>,
+ }
+}
+
+/// Linker abstraction used by `back::link` to build up the command to invoke a
+/// linker.
+///
+/// This trait is the total list of requirements needed by `back::link` and
+/// represents the meaning of each option being passed down. This trait is then
+/// used to dispatch on whether a GNU-like linker (generally `ld.exe`) or an
+/// MSVC linker (e.g., `link.exe`) is being used.
+pub trait Linker {
+ fn cmd(&mut self) -> &mut Command;
+ fn set_output_kind(&mut self, output_kind: LinkOutputKind, out_filename: &Path);
+ fn link_dylib(&mut self, lib: &str, verbatim: bool, as_needed: bool);
+ fn link_rust_dylib(&mut self, lib: &str, path: &Path);
+ fn link_framework(&mut self, framework: &str, as_needed: bool);
+ fn link_staticlib(&mut self, lib: &str, verbatim: bool);
+ fn link_rlib(&mut self, lib: &Path);
+ fn link_whole_rlib(&mut self, lib: &Path);
+ fn link_whole_staticlib(&mut self, lib: &str, verbatim: bool, search_path: &[PathBuf]);
+ fn include_path(&mut self, path: &Path);
+ fn framework_path(&mut self, path: &Path);
+ fn output_filename(&mut self, path: &Path);
+ fn add_object(&mut self, path: &Path);
+ fn gc_sections(&mut self, keep_metadata: bool);
+ fn no_gc_sections(&mut self);
+ fn full_relro(&mut self);
+ fn partial_relro(&mut self);
+ fn no_relro(&mut self);
+ fn optimize(&mut self);
+ fn pgo_gen(&mut self);
+ fn control_flow_guard(&mut self);
+ fn debuginfo(&mut self, strip: Strip, natvis_debugger_visualizers: &[PathBuf]);
+ fn no_crt_objects(&mut self);
+ fn no_default_libraries(&mut self);
+ fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType, symbols: &[String]);
+ fn subsystem(&mut self, subsystem: &str);
+ fn group_start(&mut self);
+ fn group_end(&mut self);
+ fn linker_plugin_lto(&mut self);
+ fn add_eh_frame_header(&mut self) {}
+ fn add_no_exec(&mut self) {}
+ fn add_as_needed(&mut self) {}
+ fn reset_per_library_state(&mut self) {}
+}
+
+impl dyn Linker + '_ {
+ pub fn arg(&mut self, arg: impl AsRef<OsStr>) {
+ self.cmd().arg(arg);
+ }
+
+ pub fn args(&mut self, args: impl IntoIterator<Item: AsRef<OsStr>>) {
+ self.cmd().args(args);
+ }
+
+ pub fn take_cmd(&mut self) -> Command {
+ mem::replace(self.cmd(), Command::new(""))
+ }
+}
+
+pub struct GccLinker<'a> {
+ cmd: Command,
+ sess: &'a Session,
+ target_cpu: &'a str,
+ hinted_static: bool, // Keeps track of the current hinting mode.
+ // Link as ld
+ is_ld: bool,
+}
+
+impl<'a> GccLinker<'a> {
+ /// Passes an argument directly to the linker.
+ ///
+ /// When the linker is not ld-like such as when using a compiler as a linker, the argument is
+ /// prepended by `-Wl,`.
+ fn linker_arg(&mut self, arg: impl AsRef<OsStr>) -> &mut Self {
+ self.linker_args(&[arg]);
+ self
+ }
+
+ /// Passes a series of arguments directly to the linker.
+ ///
+ /// When the linker is ld-like, the arguments are simply appended to the command. When the
+ /// linker is not ld-like such as when using a compiler as a linker, the arguments are joined by
+ /// commas to form an argument that is then prepended with `-Wl`. In this situation, only a
+ /// single argument is appended to the command to ensure that the order of the arguments is
+ /// preserved by the compiler.
+ fn linker_args(&mut self, args: &[impl AsRef<OsStr>]) -> &mut Self {
+ if self.is_ld {
+ args.into_iter().for_each(|a| {
+ self.cmd.arg(a);
+ });
+ } else {
+ if !args.is_empty() {
+ let mut s = OsString::from("-Wl");
+ for a in args {
+ s.push(",");
+ s.push(a);
+ }
+ self.cmd.arg(s);
+ }
+ }
+ self
+ }
+
+ fn takes_hints(&self) -> bool {
+ // Really this function only returns true if the underlying linker
+ // configured for a compiler is binutils `ld.bfd` and `ld.gold`. We
+ // don't really have a foolproof way to detect that, so rule out some
+ // platforms where currently this is guaranteed to *not* be the case:
+ //
+ // * On OSX they have their own linker, not binutils'
+ // * For WebAssembly the only functional linker is LLD, which doesn't
+ // support hint flags
+ !self.sess.target.is_like_osx && !self.sess.target.is_like_wasm
+ }
+
+ // Some platforms take hints about whether a library is static or dynamic.
+ // For those that support this, we ensure we pass the option if the library
+ // was flagged "static" (most defaults are dynamic) to ensure that if
+ // libfoo.a and libfoo.so both exist that the right one is chosen.
+ fn hint_static(&mut self) {
+ if !self.takes_hints() {
+ return;
+ }
+ if !self.hinted_static {
+ self.linker_arg("-Bstatic");
+ self.hinted_static = true;
+ }
+ }
+
+ fn hint_dynamic(&mut self) {
+ if !self.takes_hints() {
+ return;
+ }
+ if self.hinted_static {
+ self.linker_arg("-Bdynamic");
+ self.hinted_static = false;
+ }
+ }
+
+ fn push_linker_plugin_lto_args(&mut self, plugin_path: Option<&OsStr>) {
+ if let Some(plugin_path) = plugin_path {
+ let mut arg = OsString::from("-plugin=");
+ arg.push(plugin_path);
+ self.linker_arg(&arg);
+ }
+
+ let opt_level = match self.sess.opts.optimize {
+ config::OptLevel::No => "O0",
+ config::OptLevel::Less => "O1",
+ config::OptLevel::Default | config::OptLevel::Size | config::OptLevel::SizeMin => "O2",
+ config::OptLevel::Aggressive => "O3",
+ };
+
+ if let Some(path) = &self.sess.opts.unstable_opts.profile_sample_use {
+ self.linker_arg(&format!("-plugin-opt=sample-profile={}", path.display()));
+ };
+ self.linker_args(&[
+ &format!("-plugin-opt={}", opt_level),
+ &format!("-plugin-opt=mcpu={}", self.target_cpu),
+ ]);
+ }
+
+ fn build_dylib(&mut self, out_filename: &Path) {
+ // On mac we need to tell the linker to let this library be rpathed
+ if self.sess.target.is_like_osx {
+ if !self.is_ld {
+ self.cmd.arg("-dynamiclib");
+ }
+
+ self.linker_arg("-dylib");
+
+ // Note that the `osx_rpath_install_name` option here is a hack
+ // purely to support rustbuild right now, we should get a more
+ // principled solution at some point to force the compiler to pass
+ // the right `-Wl,-install_name` with an `@rpath` in it.
+ if self.sess.opts.cg.rpath || self.sess.opts.unstable_opts.osx_rpath_install_name {
+ let mut rpath = OsString::from("@rpath/");
+ rpath.push(out_filename.file_name().unwrap());
+ self.linker_args(&[OsString::from("-install_name"), rpath]);
+ }
+ } else {
+ self.cmd.arg("-shared");
+ if self.sess.target.is_like_windows {
+ // The output filename already contains `dll_suffix` so
+ // the resulting import library will have a name in the
+ // form of libfoo.dll.a
+ let implib_name =
+ out_filename.file_name().and_then(|file| file.to_str()).map(|file| {
+ format!(
+ "{}{}{}",
+ self.sess.target.staticlib_prefix,
+ file,
+ self.sess.target.staticlib_suffix
+ )
+ });
+ if let Some(implib_name) = implib_name {
+ let implib = out_filename.parent().map(|dir| dir.join(&implib_name));
+ if let Some(implib) = implib {
+ self.linker_arg(&format!("--out-implib={}", (*implib).to_str().unwrap()));
+ }
+ }
+ }
+ }
+ }
+}
+
+impl<'a> Linker for GccLinker<'a> {
+ fn cmd(&mut self) -> &mut Command {
+ &mut self.cmd
+ }
+
+ fn set_output_kind(&mut self, output_kind: LinkOutputKind, out_filename: &Path) {
+ match output_kind {
+ LinkOutputKind::DynamicNoPicExe => {
+ if !self.is_ld && self.sess.target.linker_is_gnu {
+ self.cmd.arg("-no-pie");
+ }
+ }
+ LinkOutputKind::DynamicPicExe => {
+ // noop on windows w/ gcc & ld, error w/ lld
+ if !self.sess.target.is_like_windows {
+ // `-pie` works for both gcc wrapper and ld.
+ self.cmd.arg("-pie");
+ }
+ }
+ LinkOutputKind::StaticNoPicExe => {
+ // `-static` works for both gcc wrapper and ld.
+ self.cmd.arg("-static");
+ if !self.is_ld && self.sess.target.linker_is_gnu {
+ self.cmd.arg("-no-pie");
+ }
+ }
+ LinkOutputKind::StaticPicExe => {
+ if !self.is_ld {
+ // Note that combination `-static -pie` doesn't work as expected
+ // for the gcc wrapper, `-static` in that case suppresses `-pie`.
+ self.cmd.arg("-static-pie");
+ } else {
+ // `--no-dynamic-linker` and `-z text` are not strictly necessary for producing
+ // a static pie, but currently passed because gcc and clang pass them.
+ // The former suppresses the `INTERP` ELF header specifying dynamic linker,
+ // which is otherwise implicitly injected by ld (but not lld).
+ // The latter doesn't change anything, only ensures that everything is pic.
+ self.cmd.args(&["-static", "-pie", "--no-dynamic-linker", "-z", "text"]);
+ }
+ }
+ LinkOutputKind::DynamicDylib => self.build_dylib(out_filename),
+ LinkOutputKind::StaticDylib => {
+ self.cmd.arg("-static");
+ self.build_dylib(out_filename);
+ }
+ LinkOutputKind::WasiReactorExe => {
+ self.linker_args(&["--entry", "_initialize"]);
+ }
+ }
+ // VxWorks compiler driver introduced `--static-crt` flag specifically for rustc,
+ // it switches linking for libc and similar system libraries to static without using
+ // any `#[link]` attributes in the `libc` crate, see #72782 for details.
+ // FIXME: Switch to using `#[link]` attributes in the `libc` crate
+ // similarly to other targets.
+ if self.sess.target.os == "vxworks"
+ && matches!(
+ output_kind,
+ LinkOutputKind::StaticNoPicExe
+ | LinkOutputKind::StaticPicExe
+ | LinkOutputKind::StaticDylib
+ )
+ {
+ self.cmd.arg("--static-crt");
+ }
+ }
+
+ fn link_dylib(&mut self, lib: &str, verbatim: bool, as_needed: bool) {
+ if self.sess.target.os == "illumos" && lib == "c" {
+ // libc will be added via late_link_args on illumos so that it will
+ // appear last in the library search order.
+ // FIXME: This should be replaced by a more complete and generic
+ // mechanism for controlling the order of library arguments passed
+ // to the linker.
+ return;
+ }
+ if !as_needed {
+ if self.sess.target.is_like_osx {
+ // FIXME(81490): ld64 doesn't support these flags but macOS 11
+ // has -needed-l{} / -needed_library {}
+ // but we have no way to detect that here.
+ self.sess.warn("`as-needed` modifier not implemented yet for ld64");
+ } else if self.sess.target.linker_is_gnu && !self.sess.target.is_like_windows {
+ self.linker_arg("--no-as-needed");
+ } else {
+ self.sess.warn("`as-needed` modifier not supported for current linker");
+ }
+ }
+ self.hint_dynamic();
+ self.cmd.arg(format!("-l{}{}", if verbatim { ":" } else { "" }, lib));
+ if !as_needed {
+ if self.sess.target.is_like_osx {
+ // See above FIXME comment
+ } else if self.sess.target.linker_is_gnu && !self.sess.target.is_like_windows {
+ self.linker_arg("--as-needed");
+ }
+ }
+ }
+ fn link_staticlib(&mut self, lib: &str, verbatim: bool) {
+ self.hint_static();
+ self.cmd.arg(format!("-l{}{}", if verbatim { ":" } else { "" }, lib));
+ }
+ fn link_rlib(&mut self, lib: &Path) {
+ self.hint_static();
+ self.cmd.arg(lib);
+ }
+ fn include_path(&mut self, path: &Path) {
+ self.cmd.arg("-L").arg(path);
+ }
+ fn framework_path(&mut self, path: &Path) {
+ self.cmd.arg("-F").arg(path);
+ }
+ fn output_filename(&mut self, path: &Path) {
+ self.cmd.arg("-o").arg(path);
+ }
+ fn add_object(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+ fn full_relro(&mut self) {
+ self.linker_args(&["-zrelro", "-znow"]);
+ }
+ fn partial_relro(&mut self) {
+ self.linker_arg("-zrelro");
+ }
+ fn no_relro(&mut self) {
+ self.linker_arg("-znorelro");
+ }
+
+ fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
+ self.hint_dynamic();
+ self.cmd.arg(format!("-l{}", lib));
+ }
+
+ fn link_framework(&mut self, framework: &str, as_needed: bool) {
+ self.hint_dynamic();
+ if !as_needed {
+ // FIXME(81490): ld64 as of macOS 11 supports the -needed_framework
+ // flag but we have no way to detect that here.
+ // self.cmd.arg("-needed_framework").arg(framework);
+ self.sess.warn("`as-needed` modifier not implemented yet for ld64");
+ }
+ self.cmd.arg("-framework").arg(framework);
+ }
+
+ // Here we explicitly ask that the entire archive is included into the
+ // result artifact. For more details see #15460, but the gist is that
+ // the linker will strip away any unused objects in the archive if we
+ // don't otherwise explicitly reference them. This can occur for
+ // libraries which are just providing bindings, libraries with generic
+ // functions, etc.
+ fn link_whole_staticlib(&mut self, lib: &str, verbatim: bool, search_path: &[PathBuf]) {
+ self.hint_static();
+ let target = &self.sess.target;
+ if !target.is_like_osx {
+ self.linker_arg("--whole-archive").cmd.arg(format!(
+ "-l{}{}",
+ if verbatim { ":" } else { "" },
+ lib
+ ));
+ self.linker_arg("--no-whole-archive");
+ } else {
+ // -force_load is the macOS equivalent of --whole-archive, but it
+ // involves passing the full path to the library to link.
+ self.linker_arg("-force_load");
+ let lib = archive::find_library(lib, verbatim, search_path, &self.sess);
+ self.linker_arg(&lib);
+ }
+ }
+
+ fn link_whole_rlib(&mut self, lib: &Path) {
+ self.hint_static();
+ if self.sess.target.is_like_osx {
+ self.linker_arg("-force_load");
+ self.linker_arg(&lib);
+ } else {
+ self.linker_arg("--whole-archive").cmd.arg(lib);
+ self.linker_arg("--no-whole-archive");
+ }
+ }
+
+ fn gc_sections(&mut self, keep_metadata: bool) {
+ // The dead_strip option to the linker specifies that functions and data
+ // unreachable by the entry point will be removed. This is quite useful
+ // with Rust's compilation model of compiling libraries at a time into
+ // one object file. For example, this brings hello world from 1.7MB to
+ // 458K.
+ //
+ // Note that this is done for both executables and dynamic libraries. We
+ // won't get much benefit from dylibs because LLVM will have already
+ // stripped away as much as it could. This has not been seen to impact
+ // link times negatively.
+ //
+ // -dead_strip can't be part of the pre_link_args because it's also used
+ // for partial linking when using multiple codegen units (-r). So we
+ // insert it here.
+ if self.sess.target.is_like_osx {
+ self.linker_arg("-dead_strip");
+
+ // If we're building a dylib, we don't use --gc-sections because LLVM
+ // has already done the best it can do, and we also don't want to
+ // eliminate the metadata. If we're building an executable, however,
+ // --gc-sections drops the size of hello world from 1.8MB to 597K, a 67%
+ // reduction.
+ } else if (self.sess.target.linker_is_gnu || self.sess.target.is_like_wasm)
+ && !keep_metadata
+ {
+ self.linker_arg("--gc-sections");
+ }
+ }
+
+ fn no_gc_sections(&mut self) {
+ if self.sess.target.linker_is_gnu || self.sess.target.is_like_wasm {
+ self.linker_arg("--no-gc-sections");
+ }
+ }
+
+ fn optimize(&mut self) {
+ if !self.sess.target.linker_is_gnu && !self.sess.target.is_like_wasm {
+ return;
+ }
+
+ // GNU-style linkers support optimization with -O. GNU ld doesn't
+ // need a numeric argument, but other linkers do.
+ if self.sess.opts.optimize == config::OptLevel::Default
+ || self.sess.opts.optimize == config::OptLevel::Aggressive
+ {
+ self.linker_arg("-O1");
+ }
+ }
+
+ fn pgo_gen(&mut self) {
+ if !self.sess.target.linker_is_gnu {
+ return;
+ }
+
+ // If we're doing PGO generation stuff and on a GNU-like linker, use the
+ // "-u" flag to properly pull in the profiler runtime bits.
+ //
+ // This is because LLVM otherwise won't add the needed initialization
+ // for us on Linux (though the extra flag should be harmless if it
+ // does).
+ //
+ // See https://reviews.llvm.org/D14033 and https://reviews.llvm.org/D14030.
+ //
+ // Though it may be worth to try to revert those changes upstream, since
+ // the overhead of the initialization should be minor.
+ self.cmd.arg("-u");
+ self.cmd.arg("__llvm_profile_runtime");
+ }
+
+ fn control_flow_guard(&mut self) {}
+
+ fn debuginfo(&mut self, strip: Strip, _: &[PathBuf]) {
+ // MacOS linker doesn't support stripping symbols directly anymore.
+ if self.sess.target.is_like_osx {
+ return;
+ }
+
+ match strip {
+ Strip::None => {}
+ Strip::Debuginfo => {
+ self.linker_arg("--strip-debug");
+ }
+ Strip::Symbols => {
+ self.linker_arg("--strip-all");
+ }
+ }
+ }
+
+ fn no_crt_objects(&mut self) {
+ if !self.is_ld {
+ self.cmd.arg("-nostartfiles");
+ }
+ }
+
+ fn no_default_libraries(&mut self) {
+ if !self.is_ld {
+ self.cmd.arg("-nodefaultlibs");
+ }
+ }
+
+ fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType, symbols: &[String]) {
+ // Symbol visibility in object files typically takes care of this.
+ if crate_type == CrateType::Executable {
+ let should_export_executable_symbols =
+ self.sess.opts.unstable_opts.export_executable_symbols;
+ if self.sess.target.override_export_symbols.is_none()
+ && !should_export_executable_symbols
+ {
+ return;
+ }
+ }
+
+ // We manually create a list of exported symbols to ensure we don't expose any more.
+ // The object files have far more public symbols than we actually want to export,
+ // so we hide them all here.
+
+ if !self.sess.target.limit_rdylib_exports {
+ return;
+ }
+
+ // FIXME(#99978) hide #[no_mangle] symbols for proc-macros
+
+ let is_windows = self.sess.target.is_like_windows;
+ let path = tmpdir.join(if is_windows { "list.def" } else { "list" });
+
+ debug!("EXPORTED SYMBOLS:");
+
+ if self.sess.target.is_like_osx {
+ // Write a plain, newline-separated list of symbols
+ let res: io::Result<()> = try {
+ let mut f = BufWriter::new(File::create(&path)?);
+ for sym in symbols {
+ debug!(" _{}", sym);
+ writeln!(f, "_{}", sym)?;
+ }
+ };
+ if let Err(e) = res {
+ self.sess.fatal(&format!("failed to write lib.def file: {}", e));
+ }
+ } else if is_windows {
+ let res: io::Result<()> = try {
+ let mut f = BufWriter::new(File::create(&path)?);
+
+ // .def file similar to MSVC one but without LIBRARY section
+ // because LD doesn't like when it's empty
+ writeln!(f, "EXPORTS")?;
+ for symbol in symbols {
+ debug!(" _{}", symbol);
+ writeln!(f, " {}", symbol)?;
+ }
+ };
+ if let Err(e) = res {
+ self.sess.fatal(&format!("failed to write list.def file: {}", e));
+ }
+ } else {
+ // Write an LD version script
+ let res: io::Result<()> = try {
+ let mut f = BufWriter::new(File::create(&path)?);
+ writeln!(f, "{{")?;
+ if !symbols.is_empty() {
+ writeln!(f, " global:")?;
+ for sym in symbols {
+ debug!(" {};", sym);
+ writeln!(f, " {};", sym)?;
+ }
+ }
+ writeln!(f, "\n local:\n *;\n}};")?;
+ };
+ if let Err(e) = res {
+ self.sess.fatal(&format!("failed to write version script: {}", e));
+ }
+ }
+
+ if self.sess.target.is_like_osx {
+ self.linker_args(&[OsString::from("-exported_symbols_list"), path.into()]);
+ } else if self.sess.target.is_like_solaris {
+ self.linker_args(&[OsString::from("-M"), path.into()]);
+ } else {
+ if is_windows {
+ self.linker_arg(path);
+ } else {
+ let mut arg = OsString::from("--version-script=");
+ arg.push(path);
+ self.linker_arg(arg);
+ }
+ }
+ }
+
+ fn subsystem(&mut self, subsystem: &str) {
+ self.linker_arg("--subsystem");
+ self.linker_arg(&subsystem);
+ }
+
+ fn reset_per_library_state(&mut self) {
+ self.hint_dynamic(); // Reset to default before returning the composed command line.
+ }
+
+ fn group_start(&mut self) {
+ if self.takes_hints() {
+ self.linker_arg("--start-group");
+ }
+ }
+
+ fn group_end(&mut self) {
+ if self.takes_hints() {
+ self.linker_arg("--end-group");
+ }
+ }
+
+ fn linker_plugin_lto(&mut self) {
+ match self.sess.opts.cg.linker_plugin_lto {
+ LinkerPluginLto::Disabled => {
+ // Nothing to do
+ }
+ LinkerPluginLto::LinkerPluginAuto => {
+ self.push_linker_plugin_lto_args(None);
+ }
+ LinkerPluginLto::LinkerPlugin(ref path) => {
+ self.push_linker_plugin_lto_args(Some(path.as_os_str()));
+ }
+ }
+ }
+
+ // Add the `GNU_EH_FRAME` program header which is required to locate unwinding information.
+ // Some versions of `gcc` add it implicitly, some (e.g. `musl-gcc`) don't,
+ // so we just always add it.
+ fn add_eh_frame_header(&mut self) {
+ self.linker_arg("--eh-frame-hdr");
+ }
+
+ fn add_no_exec(&mut self) {
+ if self.sess.target.is_like_windows {
+ self.linker_arg("--nxcompat");
+ } else if self.sess.target.linker_is_gnu {
+ self.linker_arg("-znoexecstack");
+ }
+ }
+
+ fn add_as_needed(&mut self) {
+ if self.sess.target.linker_is_gnu && !self.sess.target.is_like_windows {
+ self.linker_arg("--as-needed");
+ } else if self.sess.target.is_like_solaris {
+ // -z ignore is the Solaris equivalent to the GNU ld --as-needed option
+ self.linker_args(&["-z", "ignore"]);
+ }
+ }
+}
+
+pub struct MsvcLinker<'a> {
+ cmd: Command,
+ sess: &'a Session,
+}
+
+impl<'a> Linker for MsvcLinker<'a> {
+ fn cmd(&mut self) -> &mut Command {
+ &mut self.cmd
+ }
+
+ fn set_output_kind(&mut self, output_kind: LinkOutputKind, out_filename: &Path) {
+ match output_kind {
+ LinkOutputKind::DynamicNoPicExe
+ | LinkOutputKind::DynamicPicExe
+ | LinkOutputKind::StaticNoPicExe
+ | LinkOutputKind::StaticPicExe => {}
+ LinkOutputKind::DynamicDylib | LinkOutputKind::StaticDylib => {
+ self.cmd.arg("/DLL");
+ let mut arg: OsString = "/IMPLIB:".into();
+ arg.push(out_filename.with_extension("dll.lib"));
+ self.cmd.arg(arg);
+ }
+ LinkOutputKind::WasiReactorExe => {
+ panic!("can't link as reactor on non-wasi target");
+ }
+ }
+ }
+
+ fn link_rlib(&mut self, lib: &Path) {
+ self.cmd.arg(lib);
+ }
+ fn add_object(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+
+ fn gc_sections(&mut self, _keep_metadata: bool) {
+ // MSVC's ICF (Identical COMDAT Folding) link optimization is
+ // slow for Rust and thus we disable it by default when not in
+ // optimization build.
+ if self.sess.opts.optimize != config::OptLevel::No {
+ self.cmd.arg("/OPT:REF,ICF");
+ } else {
+ // It is necessary to specify NOICF here, because /OPT:REF
+ // implies ICF by default.
+ self.cmd.arg("/OPT:REF,NOICF");
+ }
+ }
+
+ fn no_gc_sections(&mut self) {
+ self.cmd.arg("/OPT:NOREF,NOICF");
+ }
+
+ fn link_dylib(&mut self, lib: &str, verbatim: bool, _as_needed: bool) {
+ self.cmd.arg(format!("{}{}", lib, if verbatim { "" } else { ".lib" }));
+ }
+
+ fn link_rust_dylib(&mut self, lib: &str, path: &Path) {
+ // When producing a dll, the MSVC linker may not actually emit a
+ // `foo.lib` file if the dll doesn't actually export any symbols, so we
+ // check to see if the file is there and just omit linking to it if it's
+ // not present.
+ let name = format!("{}.dll.lib", lib);
+ if path.join(&name).exists() {
+ self.cmd.arg(name);
+ }
+ }
+
+ fn link_staticlib(&mut self, lib: &str, verbatim: bool) {
+ self.cmd.arg(format!("{}{}", lib, if verbatim { "" } else { ".lib" }));
+ }
+
+ fn full_relro(&mut self) {
+ // noop
+ }
+
+ fn partial_relro(&mut self) {
+ // noop
+ }
+
+ fn no_relro(&mut self) {
+ // noop
+ }
+
+ fn no_crt_objects(&mut self) {
+ // noop
+ }
+
+ fn no_default_libraries(&mut self) {
+ self.cmd.arg("/NODEFAULTLIB");
+ }
+
+ fn include_path(&mut self, path: &Path) {
+ let mut arg = OsString::from("/LIBPATH:");
+ arg.push(path);
+ self.cmd.arg(&arg);
+ }
+
+ fn output_filename(&mut self, path: &Path) {
+ let mut arg = OsString::from("/OUT:");
+ arg.push(path);
+ self.cmd.arg(&arg);
+ }
+
+ fn framework_path(&mut self, _path: &Path) {
+ bug!("frameworks are not supported on windows")
+ }
+ fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
+ bug!("frameworks are not supported on windows")
+ }
+
+ fn link_whole_staticlib(&mut self, lib: &str, verbatim: bool, _search_path: &[PathBuf]) {
+ self.cmd.arg(format!("/WHOLEARCHIVE:{}{}", lib, if verbatim { "" } else { ".lib" }));
+ }
+ fn link_whole_rlib(&mut self, path: &Path) {
+ let mut arg = OsString::from("/WHOLEARCHIVE:");
+ arg.push(path);
+ self.cmd.arg(arg);
+ }
+ fn optimize(&mut self) {
+ // Needs more investigation of `/OPT` arguments
+ }
+
+ fn pgo_gen(&mut self) {
+ // Nothing needed here.
+ }
+
+ fn control_flow_guard(&mut self) {
+ self.cmd.arg("/guard:cf");
+ }
+
+ fn debuginfo(&mut self, strip: Strip, natvis_debugger_visualizers: &[PathBuf]) {
+ match strip {
+ Strip::None => {
+ // This will cause the Microsoft linker to generate a PDB file
+ // from the CodeView line tables in the object files.
+ self.cmd.arg("/DEBUG");
+
+ // This will cause the Microsoft linker to embed .natvis info into the PDB file
+ let natvis_dir_path = self.sess.sysroot.join("lib\\rustlib\\etc");
+ if let Ok(natvis_dir) = fs::read_dir(&natvis_dir_path) {
+ for entry in natvis_dir {
+ match entry {
+ Ok(entry) => {
+ let path = entry.path();
+ if path.extension() == Some("natvis".as_ref()) {
+ let mut arg = OsString::from("/NATVIS:");
+ arg.push(path);
+ self.cmd.arg(arg);
+ }
+ }
+ Err(err) => {
+ self.sess
+ .warn(&format!("error enumerating natvis directory: {}", err));
+ }
+ }
+ }
+ }
+
+ // This will cause the Microsoft linker to embed .natvis info for all crates into the PDB file
+ for path in natvis_debugger_visualizers {
+ let mut arg = OsString::from("/NATVIS:");
+ arg.push(path);
+ self.cmd.arg(arg);
+ }
+ }
+ Strip::Debuginfo | Strip::Symbols => {
+ self.cmd.arg("/DEBUG:NONE");
+ }
+ }
+ }
+
+ // Currently the compiler doesn't use `dllexport` (an LLVM attribute) to
+ // export symbols from a dynamic library. When building a dynamic library,
+ // however, we're going to want some symbols exported, so this function
+ // generates a DEF file which lists all the symbols.
+ //
+ // The linker will read this `*.def` file and export all the symbols from
+ // the dynamic library. Note that this is not as simple as just exporting
+ // all the symbols in the current crate (as specified by `codegen.reachable`)
+ // but rather we also need to possibly export the symbols of upstream
+ // crates. Upstream rlibs may be linked statically to this dynamic library,
+ // in which case they may continue to transitively be used and hence need
+ // their symbols exported.
+ fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType, symbols: &[String]) {
+ // Symbol visibility takes care of this typically
+ if crate_type == CrateType::Executable {
+ let should_export_executable_symbols =
+ self.sess.opts.unstable_opts.export_executable_symbols;
+ if !should_export_executable_symbols {
+ return;
+ }
+ }
+
+ let path = tmpdir.join("lib.def");
+ let res: io::Result<()> = try {
+ let mut f = BufWriter::new(File::create(&path)?);
+
+ // Start off with the standard module name header and then go
+ // straight to exports.
+ writeln!(f, "LIBRARY")?;
+ writeln!(f, "EXPORTS")?;
+ for symbol in symbols {
+ debug!(" _{}", symbol);
+ writeln!(f, " {}", symbol)?;
+ }
+ };
+ if let Err(e) = res {
+ self.sess.fatal(&format!("failed to write lib.def file: {}", e));
+ }
+ let mut arg = OsString::from("/DEF:");
+ arg.push(path);
+ self.cmd.arg(&arg);
+ }
+
+ fn subsystem(&mut self, subsystem: &str) {
+ // Note that previous passes of the compiler validated this subsystem,
+ // so we just blindly pass it to the linker.
+ self.cmd.arg(&format!("/SUBSYSTEM:{}", subsystem));
+
+ // Windows has two subsystems we're interested in right now, the console
+ // and windows subsystems. These both implicitly have different entry
+ // points (starting symbols). The console entry point starts with
+ // `mainCRTStartup` and the windows entry point starts with
+ // `WinMainCRTStartup`. These entry points, defined in system libraries,
+ // will then later probe for either `main` or `WinMain`, respectively to
+ // start the application.
+ //
+ // In Rust we just always generate a `main` function so we want control
+ // to always start there, so we force the entry point on the windows
+ // subsystem to be `mainCRTStartup` to get everything booted up
+ // correctly.
+ //
+ // For more information see RFC #1665
+ if subsystem == "windows" {
+ self.cmd.arg("/ENTRY:mainCRTStartup");
+ }
+ }
+
+ // MSVC doesn't need group indicators
+ fn group_start(&mut self) {}
+ fn group_end(&mut self) {}
+
+ fn linker_plugin_lto(&mut self) {
+ // Do nothing
+ }
+
+ fn add_no_exec(&mut self) {
+ self.cmd.arg("/NXCOMPAT");
+ }
+}
+
+pub struct EmLinker<'a> {
+ cmd: Command,
+ sess: &'a Session,
+}
+
+impl<'a> Linker for EmLinker<'a> {
+ fn cmd(&mut self) -> &mut Command {
+ &mut self.cmd
+ }
+
+ fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {}
+
+ fn include_path(&mut self, path: &Path) {
+ self.cmd.arg("-L").arg(path);
+ }
+
+ fn link_staticlib(&mut self, lib: &str, _verbatim: bool) {
+ self.cmd.arg("-l").arg(lib);
+ }
+
+ fn output_filename(&mut self, path: &Path) {
+ self.cmd.arg("-o").arg(path);
+ }
+
+ fn add_object(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+
+ fn link_dylib(&mut self, lib: &str, verbatim: bool, _as_needed: bool) {
+ // Emscripten always links statically
+ self.link_staticlib(lib, verbatim);
+ }
+
+ fn link_whole_staticlib(&mut self, lib: &str, verbatim: bool, _search_path: &[PathBuf]) {
+ // not supported?
+ self.link_staticlib(lib, verbatim);
+ }
+
+ fn link_whole_rlib(&mut self, lib: &Path) {
+ // not supported?
+ self.link_rlib(lib);
+ }
+
+ fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
+ self.link_dylib(lib, false, true);
+ }
+
+ fn link_rlib(&mut self, lib: &Path) {
+ self.add_object(lib);
+ }
+
+ fn full_relro(&mut self) {
+ // noop
+ }
+
+ fn partial_relro(&mut self) {
+ // noop
+ }
+
+ fn no_relro(&mut self) {
+ // noop
+ }
+
+ fn framework_path(&mut self, _path: &Path) {
+ bug!("frameworks are not supported on Emscripten")
+ }
+
+ fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
+ bug!("frameworks are not supported on Emscripten")
+ }
+
+ fn gc_sections(&mut self, _keep_metadata: bool) {
+ // noop
+ }
+
+ fn no_gc_sections(&mut self) {
+ // noop
+ }
+
+ fn optimize(&mut self) {
+ // Emscripten performs own optimizations
+ self.cmd.arg(match self.sess.opts.optimize {
+ OptLevel::No => "-O0",
+ OptLevel::Less => "-O1",
+ OptLevel::Default => "-O2",
+ OptLevel::Aggressive => "-O3",
+ OptLevel::Size => "-Os",
+ OptLevel::SizeMin => "-Oz",
+ });
+ }
+
+ fn pgo_gen(&mut self) {
+ // noop, but maybe we need something like the gnu linker?
+ }
+
+ fn control_flow_guard(&mut self) {}
+
+ fn debuginfo(&mut self, _strip: Strip, _: &[PathBuf]) {
+ // Preserve names or generate source maps depending on debug info
+ self.cmd.arg(match self.sess.opts.debuginfo {
+ DebugInfo::None => "-g0",
+ DebugInfo::Limited => "--profiling-funcs",
+ DebugInfo::Full => "-g",
+ });
+ }
+
+ fn no_crt_objects(&mut self) {}
+
+ fn no_default_libraries(&mut self) {
+ self.cmd.arg("-nodefaultlibs");
+ }
+
+ fn export_symbols(&mut self, _tmpdir: &Path, _crate_type: CrateType, symbols: &[String]) {
+ debug!("EXPORTED SYMBOLS:");
+
+ self.cmd.arg("-s");
+
+ let mut arg = OsString::from("EXPORTED_FUNCTIONS=");
+ let encoded = serde_json::to_string(
+ &symbols.iter().map(|sym| "_".to_owned() + sym).collect::<Vec<_>>(),
+ )
+ .unwrap();
+ debug!("{}", encoded);
+
+ arg.push(encoded);
+
+ self.cmd.arg(arg);
+ }
+
+ fn subsystem(&mut self, _subsystem: &str) {
+ // noop
+ }
+
+ // Appears not necessary on Emscripten
+ fn group_start(&mut self) {}
+ fn group_end(&mut self) {}
+
+ fn linker_plugin_lto(&mut self) {
+ // Do nothing
+ }
+}
+
+pub struct WasmLd<'a> {
+ cmd: Command,
+ sess: &'a Session,
+}
+
+impl<'a> WasmLd<'a> {
+ fn new(mut cmd: Command, sess: &'a Session) -> WasmLd<'a> {
+ // If the atomics feature is enabled for wasm then we need a whole bunch
+ // of flags:
+ //
+ // * `--shared-memory` - the link won't even succeed without this, flags
+ // the one linear memory as `shared`
+ //
+ // * `--max-memory=1G` - when specifying a shared memory this must also
+ // be specified. We conservatively choose 1GB but users should be able
+ // to override this with `-C link-arg`.
+ //
+ // * `--import-memory` - it doesn't make much sense for memory to be
+ // exported in a threaded module because typically you're
+ // sharing memory and instantiating the module multiple times. As a
+ // result if it were exported then we'd just have no sharing.
+ //
+ // * `--export=__wasm_init_memory` - when using `--passive-segments` the
+ // linker will synthesize this function, and so we need to make sure
+ // that our usage of `--export` below won't accidentally cause this
+ // function to get deleted.
+ //
+ // * `--export=*tls*` - when `#[thread_local]` symbols are used these
+ // symbols are how the TLS segments are initialized and configured.
+ if sess.target_features.contains(&sym::atomics) {
+ cmd.arg("--shared-memory");
+ cmd.arg("--max-memory=1073741824");
+ cmd.arg("--import-memory");
+ cmd.arg("--export=__wasm_init_memory");
+ cmd.arg("--export=__wasm_init_tls");
+ cmd.arg("--export=__tls_size");
+ cmd.arg("--export=__tls_align");
+ cmd.arg("--export=__tls_base");
+ }
+ WasmLd { cmd, sess }
+ }
+}
+
+impl<'a> Linker for WasmLd<'a> {
+ fn cmd(&mut self) -> &mut Command {
+ &mut self.cmd
+ }
+
+ fn set_output_kind(&mut self, output_kind: LinkOutputKind, _out_filename: &Path) {
+ match output_kind {
+ LinkOutputKind::DynamicNoPicExe
+ | LinkOutputKind::DynamicPicExe
+ | LinkOutputKind::StaticNoPicExe
+ | LinkOutputKind::StaticPicExe => {}
+ LinkOutputKind::DynamicDylib | LinkOutputKind::StaticDylib => {
+ self.cmd.arg("--no-entry");
+ }
+ LinkOutputKind::WasiReactorExe => {
+ self.cmd.arg("--entry");
+ self.cmd.arg("_initialize");
+ }
+ }
+ }
+
+ fn link_dylib(&mut self, lib: &str, _verbatim: bool, _as_needed: bool) {
+ self.cmd.arg("-l").arg(lib);
+ }
+
+ fn link_staticlib(&mut self, lib: &str, _verbatim: bool) {
+ self.cmd.arg("-l").arg(lib);
+ }
+
+ fn link_rlib(&mut self, lib: &Path) {
+ self.cmd.arg(lib);
+ }
+
+ fn include_path(&mut self, path: &Path) {
+ self.cmd.arg("-L").arg(path);
+ }
+
+ fn framework_path(&mut self, _path: &Path) {
+ panic!("frameworks not supported")
+ }
+
+ fn output_filename(&mut self, path: &Path) {
+ self.cmd.arg("-o").arg(path);
+ }
+
+ fn add_object(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+
+ fn full_relro(&mut self) {}
+
+ fn partial_relro(&mut self) {}
+
+ fn no_relro(&mut self) {}
+
+ fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
+ self.cmd.arg("-l").arg(lib);
+ }
+
+ fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
+ panic!("frameworks not supported")
+ }
+
+ fn link_whole_staticlib(&mut self, lib: &str, _verbatim: bool, _search_path: &[PathBuf]) {
+ self.cmd.arg("-l").arg(lib);
+ }
+
+ fn link_whole_rlib(&mut self, lib: &Path) {
+ self.cmd.arg(lib);
+ }
+
+ fn gc_sections(&mut self, _keep_metadata: bool) {
+ self.cmd.arg("--gc-sections");
+ }
+
+ fn no_gc_sections(&mut self) {
+ self.cmd.arg("--no-gc-sections");
+ }
+
+ fn optimize(&mut self) {
+ self.cmd.arg(match self.sess.opts.optimize {
+ OptLevel::No => "-O0",
+ OptLevel::Less => "-O1",
+ OptLevel::Default => "-O2",
+ OptLevel::Aggressive => "-O3",
+ // Currently LLD doesn't support `Os` and `Oz`, so pass through `O2`
+ // instead.
+ OptLevel::Size => "-O2",
+ OptLevel::SizeMin => "-O2",
+ });
+ }
+
+ fn pgo_gen(&mut self) {}
+
+ fn debuginfo(&mut self, strip: Strip, _: &[PathBuf]) {
+ match strip {
+ Strip::None => {}
+ Strip::Debuginfo => {
+ self.cmd.arg("--strip-debug");
+ }
+ Strip::Symbols => {
+ self.cmd.arg("--strip-all");
+ }
+ }
+ }
+
+ fn control_flow_guard(&mut self) {}
+
+ fn no_crt_objects(&mut self) {}
+
+ fn no_default_libraries(&mut self) {}
+
+ fn export_symbols(&mut self, _tmpdir: &Path, _crate_type: CrateType, symbols: &[String]) {
+ for sym in symbols {
+ self.cmd.arg("--export").arg(&sym);
+ }
+
+ // LLD will hide these otherwise-internal symbols since it only exports
+ // symbols explicitly passed via the `--export` flags above and hides all
+ // others. Various bits and pieces of tooling use this, so be sure these
+ // symbols make their way out of the linker as well.
+ self.cmd.arg("--export=__heap_base");
+ self.cmd.arg("--export=__data_end");
+ }
+
+ fn subsystem(&mut self, _subsystem: &str) {}
+
+ // Not needed for now with LLD
+ fn group_start(&mut self) {}
+ fn group_end(&mut self) {}
+
+ fn linker_plugin_lto(&mut self) {
+ // Do nothing for now
+ }
+}
+
+/// Linker shepherd script for L4Re (Fiasco)
+pub struct L4Bender<'a> {
+ cmd: Command,
+ sess: &'a Session,
+ hinted_static: bool,
+}
+
+impl<'a> Linker for L4Bender<'a> {
+ fn link_dylib(&mut self, _lib: &str, _verbatim: bool, _as_needed: bool) {
+ bug!("dylibs are not supported on L4Re");
+ }
+ fn link_staticlib(&mut self, lib: &str, _verbatim: bool) {
+ self.hint_static();
+ self.cmd.arg(format!("-PC{}", lib));
+ }
+ fn link_rlib(&mut self, lib: &Path) {
+ self.hint_static();
+ self.cmd.arg(lib);
+ }
+ fn include_path(&mut self, path: &Path) {
+ self.cmd.arg("-L").arg(path);
+ }
+ fn framework_path(&mut self, _: &Path) {
+ bug!("frameworks are not supported on L4Re");
+ }
+ fn output_filename(&mut self, path: &Path) {
+ self.cmd.arg("-o").arg(path);
+ }
+
+ fn add_object(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+
+ fn full_relro(&mut self) {
+ self.cmd.arg("-zrelro");
+ self.cmd.arg("-znow");
+ }
+
+ fn partial_relro(&mut self) {
+ self.cmd.arg("-zrelro");
+ }
+
+ fn no_relro(&mut self) {
+ self.cmd.arg("-znorelro");
+ }
+
+ fn cmd(&mut self) -> &mut Command {
+ &mut self.cmd
+ }
+
+ fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {}
+
+ fn link_rust_dylib(&mut self, _: &str, _: &Path) {
+ panic!("Rust dylibs not supported");
+ }
+
+ fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
+ bug!("frameworks not supported on L4Re");
+ }
+
+ fn link_whole_staticlib(&mut self, lib: &str, _verbatim: bool, _search_path: &[PathBuf]) {
+ self.hint_static();
+ self.cmd.arg("--whole-archive").arg(format!("-l{}", lib));
+ self.cmd.arg("--no-whole-archive");
+ }
+
+ fn link_whole_rlib(&mut self, lib: &Path) {
+ self.hint_static();
+ self.cmd.arg("--whole-archive").arg(lib).arg("--no-whole-archive");
+ }
+
+ fn gc_sections(&mut self, keep_metadata: bool) {
+ if !keep_metadata {
+ self.cmd.arg("--gc-sections");
+ }
+ }
+
+ fn no_gc_sections(&mut self) {
+ self.cmd.arg("--no-gc-sections");
+ }
+
+ fn optimize(&mut self) {
+ // GNU-style linkers support optimization with -O. GNU ld doesn't
+ // need a numeric argument, but other linkers do.
+ if self.sess.opts.optimize == config::OptLevel::Default
+ || self.sess.opts.optimize == config::OptLevel::Aggressive
+ {
+ self.cmd.arg("-O1");
+ }
+ }
+
+ fn pgo_gen(&mut self) {}
+
+ fn debuginfo(&mut self, strip: Strip, _: &[PathBuf]) {
+ match strip {
+ Strip::None => {}
+ Strip::Debuginfo => {
+ self.cmd().arg("--strip-debug");
+ }
+ Strip::Symbols => {
+ self.cmd().arg("--strip-all");
+ }
+ }
+ }
+
+ fn no_default_libraries(&mut self) {
+ self.cmd.arg("-nostdlib");
+ }
+
+ fn export_symbols(&mut self, _: &Path, _: CrateType, _: &[String]) {
+ // ToDo, not implemented, copy from GCC
+ self.sess.warn("exporting symbols not implemented yet for L4Bender");
+ return;
+ }
+
+ fn subsystem(&mut self, subsystem: &str) {
+ self.cmd.arg(&format!("--subsystem {}", subsystem));
+ }
+
+ fn reset_per_library_state(&mut self) {
+ self.hint_static(); // Reset to default before returning the composed command line.
+ }
+
+ fn group_start(&mut self) {
+ self.cmd.arg("--start-group");
+ }
+
+ fn group_end(&mut self) {
+ self.cmd.arg("--end-group");
+ }
+
+ fn linker_plugin_lto(&mut self) {}
+
+ fn control_flow_guard(&mut self) {}
+
+ fn no_crt_objects(&mut self) {}
+}
+
+impl<'a> L4Bender<'a> {
+ pub fn new(cmd: Command, sess: &'a Session) -> L4Bender<'a> {
+ L4Bender { cmd: cmd, sess: sess, hinted_static: false }
+ }
+
+ fn hint_static(&mut self) {
+ if !self.hinted_static {
+ self.cmd.arg("-static");
+ self.hinted_static = true;
+ }
+ }
+}
+
+fn for_each_exported_symbols_include_dep<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ crate_type: CrateType,
+ mut callback: impl FnMut(ExportedSymbol<'tcx>, SymbolExportInfo, CrateNum),
+) {
+ for &(symbol, info) in tcx.exported_symbols(LOCAL_CRATE).iter() {
+ callback(symbol, info, LOCAL_CRATE);
+ }
+
+ let formats = tcx.dependency_formats(());
+ let deps = formats.iter().find_map(|(t, list)| (*t == crate_type).then_some(list)).unwrap();
+
+ for (index, dep_format) in deps.iter().enumerate() {
+ let cnum = CrateNum::new(index + 1);
+ // For each dependency that we are linking to statically ...
+ if *dep_format == Linkage::Static {
+ for &(symbol, info) in tcx.exported_symbols(cnum).iter() {
+ callback(symbol, info, cnum);
+ }
+ }
+ }
+}
+
+pub(crate) fn exported_symbols(tcx: TyCtxt<'_>, crate_type: CrateType) -> Vec<String> {
+ if let Some(ref exports) = tcx.sess.target.override_export_symbols {
+ return exports.iter().map(ToString::to_string).collect();
+ }
+
+ let mut symbols = Vec::new();
+
+ let export_threshold = symbol_export::crates_export_threshold(&[crate_type]);
+ for_each_exported_symbols_include_dep(tcx, crate_type, |symbol, info, cnum| {
+ if info.level.is_below_threshold(export_threshold) {
+ symbols.push(symbol_export::symbol_name_for_instance_in_crate(tcx, symbol, cnum));
+ }
+ });
+
+ symbols
+}
+
+pub(crate) fn linked_symbols(
+ tcx: TyCtxt<'_>,
+ crate_type: CrateType,
+) -> Vec<(String, SymbolExportKind)> {
+ match crate_type {
+ CrateType::Executable | CrateType::Cdylib | CrateType::Dylib => (),
+ CrateType::Staticlib | CrateType::ProcMacro | CrateType::Rlib => {
+ return Vec::new();
+ }
+ }
+
+ let mut symbols = Vec::new();
+
+ let export_threshold = symbol_export::crates_export_threshold(&[crate_type]);
+ for_each_exported_symbols_include_dep(tcx, crate_type, |symbol, info, cnum| {
+ if info.level.is_below_threshold(export_threshold) || info.used {
+ symbols.push((
+ symbol_export::linking_symbol_name_for_instance_in_crate(tcx, symbol, cnum),
+ info.kind,
+ ));
+ }
+ });
+
+ symbols
+}
+
+/// Much simplified and explicit CLI for the NVPTX linker. The linker operates
+/// with bitcode and uses LLVM backend to generate a PTX assembly.
+pub struct PtxLinker<'a> {
+ cmd: Command,
+ sess: &'a Session,
+}
+
+impl<'a> Linker for PtxLinker<'a> {
+ fn cmd(&mut self) -> &mut Command {
+ &mut self.cmd
+ }
+
+ fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {}
+
+ fn link_rlib(&mut self, path: &Path) {
+ self.cmd.arg("--rlib").arg(path);
+ }
+
+ fn link_whole_rlib(&mut self, path: &Path) {
+ self.cmd.arg("--rlib").arg(path);
+ }
+
+ fn include_path(&mut self, path: &Path) {
+ self.cmd.arg("-L").arg(path);
+ }
+
+ fn debuginfo(&mut self, _strip: Strip, _: &[PathBuf]) {
+ self.cmd.arg("--debug");
+ }
+
+ fn add_object(&mut self, path: &Path) {
+ self.cmd.arg("--bitcode").arg(path);
+ }
+
+ fn optimize(&mut self) {
+ match self.sess.lto() {
+ Lto::Thin | Lto::Fat | Lto::ThinLocal => {
+ self.cmd.arg("-Olto");
+ }
+
+ Lto::No => {}
+ };
+ }
+
+ fn output_filename(&mut self, path: &Path) {
+ self.cmd.arg("-o").arg(path);
+ }
+
+ fn link_dylib(&mut self, _lib: &str, _verbatim: bool, _as_needed: bool) {
+ panic!("external dylibs not supported")
+ }
+
+ fn link_rust_dylib(&mut self, _lib: &str, _path: &Path) {
+ panic!("external dylibs not supported")
+ }
+
+ fn link_staticlib(&mut self, _lib: &str, _verbatim: bool) {
+ panic!("staticlibs not supported")
+ }
+
+ fn link_whole_staticlib(&mut self, _lib: &str, _verbatim: bool, _search_path: &[PathBuf]) {
+ panic!("staticlibs not supported")
+ }
+
+ fn framework_path(&mut self, _path: &Path) {
+ panic!("frameworks not supported")
+ }
+
+ fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
+ panic!("frameworks not supported")
+ }
+
+ fn full_relro(&mut self) {}
+
+ fn partial_relro(&mut self) {}
+
+ fn no_relro(&mut self) {}
+
+ fn gc_sections(&mut self, _keep_metadata: bool) {}
+
+ fn no_gc_sections(&mut self) {}
+
+ fn pgo_gen(&mut self) {}
+
+ fn no_crt_objects(&mut self) {}
+
+ fn no_default_libraries(&mut self) {}
+
+ fn control_flow_guard(&mut self) {}
+
+ fn export_symbols(&mut self, _tmpdir: &Path, _crate_type: CrateType, _symbols: &[String]) {}
+
+ fn subsystem(&mut self, _subsystem: &str) {}
+
+ fn group_start(&mut self) {}
+
+ fn group_end(&mut self) {}
+
+ fn linker_plugin_lto(&mut self) {}
+}
+
+pub struct BpfLinker<'a> {
+ cmd: Command,
+ sess: &'a Session,
+}
+
+impl<'a> Linker for BpfLinker<'a> {
+ fn cmd(&mut self) -> &mut Command {
+ &mut self.cmd
+ }
+
+ fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {}
+
+ fn link_rlib(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+
+ fn link_whole_rlib(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+
+ fn include_path(&mut self, path: &Path) {
+ self.cmd.arg("-L").arg(path);
+ }
+
+ fn debuginfo(&mut self, _strip: Strip, _: &[PathBuf]) {
+ self.cmd.arg("--debug");
+ }
+
+ fn add_object(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+
+ fn optimize(&mut self) {
+ self.cmd.arg(match self.sess.opts.optimize {
+ OptLevel::No => "-O0",
+ OptLevel::Less => "-O1",
+ OptLevel::Default => "-O2",
+ OptLevel::Aggressive => "-O3",
+ OptLevel::Size => "-Os",
+ OptLevel::SizeMin => "-Oz",
+ });
+ }
+
+ fn output_filename(&mut self, path: &Path) {
+ self.cmd.arg("-o").arg(path);
+ }
+
+ fn link_dylib(&mut self, _lib: &str, _verbatim: bool, _as_needed: bool) {
+ panic!("external dylibs not supported")
+ }
+
+ fn link_rust_dylib(&mut self, _lib: &str, _path: &Path) {
+ panic!("external dylibs not supported")
+ }
+
+ fn link_staticlib(&mut self, _lib: &str, _verbatim: bool) {
+ panic!("staticlibs not supported")
+ }
+
+ fn link_whole_staticlib(&mut self, _lib: &str, _verbatim: bool, _search_path: &[PathBuf]) {
+ panic!("staticlibs not supported")
+ }
+
+ fn framework_path(&mut self, _path: &Path) {
+ panic!("frameworks not supported")
+ }
+
+ fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
+ panic!("frameworks not supported")
+ }
+
+ fn full_relro(&mut self) {}
+
+ fn partial_relro(&mut self) {}
+
+ fn no_relro(&mut self) {}
+
+ fn gc_sections(&mut self, _keep_metadata: bool) {}
+
+ fn no_gc_sections(&mut self) {}
+
+ fn pgo_gen(&mut self) {}
+
+ fn no_crt_objects(&mut self) {}
+
+ fn no_default_libraries(&mut self) {}
+
+ fn control_flow_guard(&mut self) {}
+
+ fn export_symbols(&mut self, tmpdir: &Path, _crate_type: CrateType, symbols: &[String]) {
+ let path = tmpdir.join("symbols");
+ let res: io::Result<()> = try {
+ let mut f = BufWriter::new(File::create(&path)?);
+ for sym in symbols {
+ writeln!(f, "{}", sym)?;
+ }
+ };
+ if let Err(e) = res {
+ self.sess.fatal(&format!("failed to write symbols file: {}", e));
+ } else {
+ self.cmd.arg("--export-symbols").arg(&path);
+ }
+ }
+
+ fn subsystem(&mut self, _subsystem: &str) {}
+
+ fn group_start(&mut self) {}
+
+ fn group_end(&mut self) {}
+
+ fn linker_plugin_lto(&mut self) {}
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/lto.rs b/compiler/rustc_codegen_ssa/src/back/lto.rs
new file mode 100644
index 000000000..cb6244050
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/lto.rs
@@ -0,0 +1,104 @@
+use super::write::CodegenContext;
+use crate::traits::*;
+use crate::ModuleCodegen;
+
+use rustc_data_structures::memmap::Mmap;
+use rustc_errors::FatalError;
+
+use std::ffi::CString;
+use std::sync::Arc;
+
+pub struct ThinModule<B: WriteBackendMethods> {
+ pub shared: Arc<ThinShared<B>>,
+ pub idx: usize,
+}
+
+impl<B: WriteBackendMethods> ThinModule<B> {
+ pub fn name(&self) -> &str {
+ self.shared.module_names[self.idx].to_str().unwrap()
+ }
+
+ pub fn cost(&self) -> u64 {
+ // Yes, that's correct, we're using the size of the bytecode as an
+ // indicator for how costly this codegen unit is.
+ self.data().len() as u64
+ }
+
+ pub fn data(&self) -> &[u8] {
+ let a = self.shared.thin_buffers.get(self.idx).map(|b| b.data());
+ a.unwrap_or_else(|| {
+ let len = self.shared.thin_buffers.len();
+ self.shared.serialized_modules[self.idx - len].data()
+ })
+ }
+}
+
+pub struct ThinShared<B: WriteBackendMethods> {
+ pub data: B::ThinData,
+ pub thin_buffers: Vec<B::ThinBuffer>,
+ pub serialized_modules: Vec<SerializedModule<B::ModuleBuffer>>,
+ pub module_names: Vec<CString>,
+}
+
+pub enum LtoModuleCodegen<B: WriteBackendMethods> {
+ Fat {
+ module: ModuleCodegen<B::Module>,
+ _serialized_bitcode: Vec<SerializedModule<B::ModuleBuffer>>,
+ },
+
+ Thin(ThinModule<B>),
+}
+
+impl<B: WriteBackendMethods> LtoModuleCodegen<B> {
+ pub fn name(&self) -> &str {
+ match *self {
+ LtoModuleCodegen::Fat { .. } => "everything",
+ LtoModuleCodegen::Thin(ref m) => m.name(),
+ }
+ }
+
+ /// Optimize this module within the given codegen context.
+ ///
+ /// This function is unsafe as it'll return a `ModuleCodegen` still
+ /// points to LLVM data structures owned by this `LtoModuleCodegen`.
+ /// It's intended that the module returned is immediately code generated and
+ /// dropped, and then this LTO module is dropped.
+ pub unsafe fn optimize(
+ self,
+ cgcx: &CodegenContext<B>,
+ ) -> Result<ModuleCodegen<B::Module>, FatalError> {
+ match self {
+ LtoModuleCodegen::Fat { mut module, .. } => {
+ B::optimize_fat(cgcx, &mut module)?;
+ Ok(module)
+ }
+ LtoModuleCodegen::Thin(thin) => B::optimize_thin(cgcx, thin),
+ }
+ }
+
+ /// A "gauge" of how costly it is to optimize this module, used to sort
+ /// biggest modules first.
+ pub fn cost(&self) -> u64 {
+ match *self {
+ // Only one module with fat LTO, so the cost doesn't matter.
+ LtoModuleCodegen::Fat { .. } => 0,
+ LtoModuleCodegen::Thin(ref m) => m.cost(),
+ }
+ }
+}
+
+pub enum SerializedModule<M: ModuleBufferMethods> {
+ Local(M),
+ FromRlib(Vec<u8>),
+ FromUncompressedFile(Mmap),
+}
+
+impl<M: ModuleBufferMethods> SerializedModule<M> {
+ pub fn data(&self) -> &[u8] {
+ match *self {
+ SerializedModule::Local(ref m) => m.data(),
+ SerializedModule::FromRlib(ref m) => m,
+ SerializedModule::FromUncompressedFile(ref m) => m,
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/metadata.rs b/compiler/rustc_codegen_ssa/src/back/metadata.rs
new file mode 100644
index 000000000..0302c2881
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/metadata.rs
@@ -0,0 +1,314 @@
+//! Reading of the rustc metadata for rlibs and dylibs
+
+use std::fs::File;
+use std::io::Write;
+use std::path::Path;
+
+use object::write::{self, StandardSegment, Symbol, SymbolSection};
+use object::{
+ elf, pe, Architecture, BinaryFormat, Endianness, FileFlags, Object, ObjectSection,
+ SectionFlags, SectionKind, SymbolFlags, SymbolKind, SymbolScope,
+};
+
+use snap::write::FrameEncoder;
+
+use rustc_data_structures::memmap::Mmap;
+use rustc_data_structures::owning_ref::OwningRef;
+use rustc_data_structures::rustc_erase_owner;
+use rustc_data_structures::sync::MetadataRef;
+use rustc_metadata::fs::METADATA_FILENAME;
+use rustc_metadata::EncodedMetadata;
+use rustc_session::cstore::MetadataLoader;
+use rustc_session::Session;
+use rustc_target::abi::Endian;
+use rustc_target::spec::{RelocModel, Target};
+
+/// The default metadata loader. This is used by cg_llvm and cg_clif.
+///
+/// # Metadata location
+///
+/// <dl>
+/// <dt>rlib</dt>
+/// <dd>The metadata can be found in the `lib.rmeta` file inside of the ar archive.</dd>
+/// <dt>dylib</dt>
+/// <dd>The metadata can be found in the `.rustc` section of the shared library.</dd>
+/// </dl>
+pub struct DefaultMetadataLoader;
+
+fn load_metadata_with(
+ path: &Path,
+ f: impl for<'a> FnOnce(&'a [u8]) -> Result<&'a [u8], String>,
+) -> Result<MetadataRef, String> {
+ let file =
+ File::open(path).map_err(|e| format!("failed to open file '{}': {}", path.display(), e))?;
+ let data = unsafe { Mmap::map(file) }
+ .map_err(|e| format!("failed to mmap file '{}': {}", path.display(), e))?;
+ let metadata = OwningRef::new(data).try_map(f)?;
+ return Ok(rustc_erase_owner!(metadata.map_owner_box()));
+}
+
+impl MetadataLoader for DefaultMetadataLoader {
+ fn get_rlib_metadata(&self, _target: &Target, path: &Path) -> Result<MetadataRef, String> {
+ load_metadata_with(path, |data| {
+ let archive = object::read::archive::ArchiveFile::parse(&*data)
+ .map_err(|e| format!("failed to parse rlib '{}': {}", path.display(), e))?;
+
+ for entry_result in archive.members() {
+ let entry = entry_result
+ .map_err(|e| format!("failed to parse rlib '{}': {}", path.display(), e))?;
+ if entry.name() == METADATA_FILENAME.as_bytes() {
+ let data = entry
+ .data(data)
+ .map_err(|e| format!("failed to parse rlib '{}': {}", path.display(), e))?;
+ return search_for_metadata(path, data, ".rmeta");
+ }
+ }
+
+ Err(format!("metadata not found in rlib '{}'", path.display()))
+ })
+ }
+
+ fn get_dylib_metadata(&self, _target: &Target, path: &Path) -> Result<MetadataRef, String> {
+ load_metadata_with(path, |data| search_for_metadata(path, data, ".rustc"))
+ }
+}
+
+fn search_for_metadata<'a>(
+ path: &Path,
+ bytes: &'a [u8],
+ section: &str,
+) -> Result<&'a [u8], String> {
+ let Ok(file) = object::File::parse(bytes) else {
+ // The parse above could fail for odd reasons like corruption, but for
+ // now we just interpret it as this target doesn't support metadata
+ // emission in object files so the entire byte slice itself is probably
+ // a metadata file. Ideally though if necessary we could at least check
+ // the prefix of bytes to see if it's an actual metadata object and if
+ // not forward the error along here.
+ return Ok(bytes);
+ };
+ file.section_by_name(section)
+ .ok_or_else(|| format!("no `{}` section in '{}'", section, path.display()))?
+ .data()
+ .map_err(|e| format!("failed to read {} section in '{}': {}", section, path.display(), e))
+}
+
+pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static>> {
+ let endianness = match sess.target.options.endian {
+ Endian::Little => Endianness::Little,
+ Endian::Big => Endianness::Big,
+ };
+ let architecture = match &sess.target.arch[..] {
+ "arm" => Architecture::Arm,
+ "aarch64" => Architecture::Aarch64,
+ "x86" => Architecture::I386,
+ "s390x" => Architecture::S390x,
+ "mips" => Architecture::Mips,
+ "mips64" => Architecture::Mips64,
+ "x86_64" => {
+ if sess.target.pointer_width == 32 {
+ Architecture::X86_64_X32
+ } else {
+ Architecture::X86_64
+ }
+ }
+ "powerpc" => Architecture::PowerPc,
+ "powerpc64" => Architecture::PowerPc64,
+ "riscv32" => Architecture::Riscv32,
+ "riscv64" => Architecture::Riscv64,
+ "sparc64" => Architecture::Sparc64,
+ // Unsupported architecture.
+ _ => return None,
+ };
+ let binary_format = if sess.target.is_like_osx {
+ BinaryFormat::MachO
+ } else if sess.target.is_like_windows {
+ BinaryFormat::Coff
+ } else {
+ BinaryFormat::Elf
+ };
+
+ let mut file = write::Object::new(binary_format, architecture, endianness);
+ let e_flags = match architecture {
+ Architecture::Mips => {
+ let arch = match sess.target.options.cpu.as_ref() {
+ "mips1" => elf::EF_MIPS_ARCH_1,
+ "mips2" => elf::EF_MIPS_ARCH_2,
+ "mips3" => elf::EF_MIPS_ARCH_3,
+ "mips4" => elf::EF_MIPS_ARCH_4,
+ "mips5" => elf::EF_MIPS_ARCH_5,
+ s if s.contains("r6") => elf::EF_MIPS_ARCH_32R6,
+ _ => elf::EF_MIPS_ARCH_32R2,
+ };
+ // The only ABI LLVM supports for 32-bit MIPS CPUs is o32.
+ let mut e_flags = elf::EF_MIPS_CPIC | elf::EF_MIPS_ABI_O32 | arch;
+ if sess.target.options.relocation_model != RelocModel::Static {
+ e_flags |= elf::EF_MIPS_PIC;
+ }
+ if sess.target.options.cpu.contains("r6") {
+ e_flags |= elf::EF_MIPS_NAN2008;
+ }
+ e_flags
+ }
+ Architecture::Mips64 => {
+ // copied from `mips64el-linux-gnuabi64-gcc foo.c -c`
+ let e_flags = elf::EF_MIPS_CPIC
+ | elf::EF_MIPS_PIC
+ | if sess.target.options.cpu.contains("r6") {
+ elf::EF_MIPS_ARCH_64R6 | elf::EF_MIPS_NAN2008
+ } else {
+ elf::EF_MIPS_ARCH_64R2
+ };
+ e_flags
+ }
+ Architecture::Riscv64 if sess.target.options.features.contains("+d") => {
+ // copied from `riscv64-linux-gnu-gcc foo.c -c`, note though
+ // that the `+d` target feature represents whether the double
+ // float abi is enabled.
+ let e_flags = elf::EF_RISCV_RVC | elf::EF_RISCV_FLOAT_ABI_DOUBLE;
+ e_flags
+ }
+ _ => 0,
+ };
+ // adapted from LLVM's `MCELFObjectTargetWriter::getOSABI`
+ let os_abi = match sess.target.options.os.as_ref() {
+ "hermit" => elf::ELFOSABI_STANDALONE,
+ "freebsd" => elf::ELFOSABI_FREEBSD,
+ "solaris" => elf::ELFOSABI_SOLARIS,
+ _ => elf::ELFOSABI_NONE,
+ };
+ let abi_version = 0;
+ file.flags = FileFlags::Elf { os_abi, abi_version, e_flags };
+ Some(file)
+}
+
+pub enum MetadataPosition {
+ First,
+ Last,
+}
+
+// For rlibs we "pack" rustc metadata into a dummy object file. When rustc
+// creates a dylib crate type it will pass `--whole-archive` (or the
+// platform equivalent) to include all object files from an rlib into the
+// final dylib itself. This causes linkers to iterate and try to include all
+// files located in an archive, so if metadata is stored in an archive then
+// it needs to be of a form that the linker will be able to process.
+//
+// Note, though, that we don't actually want this metadata to show up in any
+// final output of the compiler. Instead this is purely for rustc's own
+// metadata tracking purposes.
+//
+// With the above in mind, each "flavor" of object format gets special
+// handling here depending on the target:
+//
+// * MachO - macos-like targets will insert the metadata into a section that
+// is sort of fake dwarf debug info. Inspecting the source of the macos
+// linker this causes these sections to be skipped automatically because
+// it's not in an allowlist of otherwise well known dwarf section names to
+// go into the final artifact.
+//
+// * WebAssembly - we actually don't have any container format for this
+// target. WebAssembly doesn't support the `dylib` crate type anyway so
+// there's no need for us to support this at this time. Consequently the
+// metadata bytes are simply stored as-is into an rlib.
+//
+// * COFF - Windows-like targets create an object with a section that has
+// the `IMAGE_SCN_LNK_REMOVE` flag set which ensures that if the linker
+// ever sees the section it doesn't process it and it's removed.
+//
+// * ELF - All other targets are similar to Windows in that there's a
+// `SHF_EXCLUDE` flag we can set on sections in an object file to get
+// automatically removed from the final output.
+pub fn create_rmeta_file(sess: &Session, metadata: &[u8]) -> (Vec<u8>, MetadataPosition) {
+ let Some(mut file) = create_object_file(sess) else {
+ // This is used to handle all "other" targets. This includes targets
+ // in two categories:
+ //
+ // * Some targets don't have support in the `object` crate just yet
+ // to write an object file. These targets are likely to get filled
+ // out over time.
+ //
+ // * Targets like WebAssembly don't support dylibs, so the purpose
+ // of putting metadata in object files, to support linking rlibs
+ // into dylibs, is moot.
+ //
+ // In both of these cases it means that linking into dylibs will
+ // not be supported by rustc. This doesn't matter for targets like
+ // WebAssembly and for targets not supported by the `object` crate
+ // yet it means that work will need to be done in the `object` crate
+ // to add a case above.
+ return (metadata.to_vec(), MetadataPosition::Last);
+ };
+ let section = file.add_section(
+ file.segment_name(StandardSegment::Debug).to_vec(),
+ b".rmeta".to_vec(),
+ SectionKind::Debug,
+ );
+ match file.format() {
+ BinaryFormat::Coff => {
+ file.section_mut(section).flags =
+ SectionFlags::Coff { characteristics: pe::IMAGE_SCN_LNK_REMOVE };
+ }
+ BinaryFormat::Elf => {
+ file.section_mut(section).flags =
+ SectionFlags::Elf { sh_flags: elf::SHF_EXCLUDE as u64 };
+ }
+ _ => {}
+ };
+ file.append_section_data(section, metadata, 1);
+ (file.write().unwrap(), MetadataPosition::First)
+}
+
+// Historical note:
+//
+// When using link.exe it was seen that the section name `.note.rustc`
+// was getting shortened to `.note.ru`, and according to the PE and COFF
+// specification:
+//
+// > Executable images do not use a string table and do not support
+// > section names longer than 8 characters
+//
+// https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
+//
+// As a result, we choose a slightly shorter name! As to why
+// `.note.rustc` works on MinGW, see
+// https://github.com/llvm/llvm-project/blob/llvmorg-12.0.0/lld/COFF/Writer.cpp#L1190-L1197
+pub fn create_compressed_metadata_file(
+ sess: &Session,
+ metadata: &EncodedMetadata,
+ symbol_name: &str,
+) -> Vec<u8> {
+ let mut compressed = rustc_metadata::METADATA_HEADER.to_vec();
+ FrameEncoder::new(&mut compressed).write_all(metadata.raw_data()).unwrap();
+ let Some(mut file) = create_object_file(sess) else {
+ return compressed.to_vec();
+ };
+ let section = file.add_section(
+ file.segment_name(StandardSegment::Data).to_vec(),
+ b".rustc".to_vec(),
+ SectionKind::ReadOnlyData,
+ );
+ match file.format() {
+ BinaryFormat::Elf => {
+ // Explicitly set no flags to avoid SHF_ALLOC default for data section.
+ file.section_mut(section).flags = SectionFlags::Elf { sh_flags: 0 };
+ }
+ _ => {}
+ };
+ let offset = file.append_section_data(section, &compressed, 1);
+
+ // For MachO and probably PE this is necessary to prevent the linker from throwing away the
+ // .rustc section. For ELF this isn't necessary, but it also doesn't harm.
+ file.add_symbol(Symbol {
+ name: symbol_name.as_bytes().to_vec(),
+ value: offset,
+ size: compressed.len() as u64,
+ kind: SymbolKind::Data,
+ scope: SymbolScope::Dynamic,
+ weak: false,
+ section: SymbolSection::Section(section),
+ flags: SymbolFlags::None,
+ });
+
+ file.write().unwrap()
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/mod.rs b/compiler/rustc_codegen_ssa/src/back/mod.rs
new file mode 100644
index 000000000..d11ed54eb
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/mod.rs
@@ -0,0 +1,9 @@
+pub mod archive;
+pub mod command;
+pub mod link;
+pub mod linker;
+pub mod lto;
+pub mod metadata;
+pub mod rpath;
+pub mod symbol_export;
+pub mod write;
diff --git a/compiler/rustc_codegen_ssa/src/back/rpath.rs b/compiler/rustc_codegen_ssa/src/back/rpath.rs
new file mode 100644
index 000000000..0b5656c9a
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/rpath.rs
@@ -0,0 +1,114 @@
+use pathdiff::diff_paths;
+use rustc_data_structures::fx::FxHashSet;
+use std::env;
+use std::fs;
+use std::path::{Path, PathBuf};
+
+pub struct RPathConfig<'a> {
+ pub libs: &'a [&'a Path],
+ pub out_filename: PathBuf,
+ pub is_like_osx: bool,
+ pub has_rpath: bool,
+ pub linker_is_gnu: bool,
+}
+
+pub fn get_rpath_flags(config: &mut RPathConfig<'_>) -> Vec<String> {
+ // No rpath on windows
+ if !config.has_rpath {
+ return Vec::new();
+ }
+
+ debug!("preparing the RPATH!");
+
+ let rpaths = get_rpaths(config);
+ let mut flags = rpaths_to_flags(&rpaths);
+
+ if config.linker_is_gnu {
+ // Use DT_RUNPATH instead of DT_RPATH if available
+ flags.push("-Wl,--enable-new-dtags".to_owned());
+
+ // Set DF_ORIGIN for substitute $ORIGIN
+ flags.push("-Wl,-z,origin".to_owned());
+ }
+
+ flags
+}
+
+fn rpaths_to_flags(rpaths: &[String]) -> Vec<String> {
+ let mut ret = Vec::with_capacity(rpaths.len()); // the minimum needed capacity
+
+ for rpath in rpaths {
+ if rpath.contains(',') {
+ ret.push("-Wl,-rpath".into());
+ ret.push("-Xlinker".into());
+ ret.push(rpath.clone());
+ } else {
+ ret.push(format!("-Wl,-rpath,{}", &(*rpath)));
+ }
+ }
+
+ ret
+}
+
+fn get_rpaths(config: &mut RPathConfig<'_>) -> Vec<String> {
+ debug!("output: {:?}", config.out_filename.display());
+ debug!("libs:");
+ for libpath in config.libs {
+ debug!(" {:?}", libpath.display());
+ }
+
+ // Use relative paths to the libraries. Binaries can be moved
+ // as long as they maintain the relative relationship to the
+ // crates they depend on.
+ let rpaths = get_rpaths_relative_to_output(config);
+
+ debug!("rpaths:");
+ for rpath in &rpaths {
+ debug!(" {}", rpath);
+ }
+
+ // Remove duplicates
+ minimize_rpaths(&rpaths)
+}
+
+fn get_rpaths_relative_to_output(config: &mut RPathConfig<'_>) -> Vec<String> {
+ config.libs.iter().map(|a| get_rpath_relative_to_output(config, a)).collect()
+}
+
+fn get_rpath_relative_to_output(config: &mut RPathConfig<'_>, lib: &Path) -> String {
+ // Mac doesn't appear to support $ORIGIN
+ let prefix = if config.is_like_osx { "@loader_path" } else { "$ORIGIN" };
+
+ let cwd = env::current_dir().unwrap();
+ let mut lib = fs::canonicalize(&cwd.join(lib)).unwrap_or_else(|_| cwd.join(lib));
+ lib.pop(); // strip filename
+ let mut output = cwd.join(&config.out_filename);
+ output.pop(); // strip filename
+ let output = fs::canonicalize(&output).unwrap_or(output);
+ let relative = path_relative_from(&lib, &output)
+ .unwrap_or_else(|| panic!("couldn't create relative path from {:?} to {:?}", output, lib));
+ // FIXME (#9639): This needs to handle non-utf8 paths
+ format!("{}/{}", prefix, relative.to_str().expect("non-utf8 component in path"))
+}
+
+// This routine is adapted from the *old* Path's `path_relative_from`
+// function, which works differently from the new `relative_from` function.
+// In particular, this handles the case on unix where both paths are
+// absolute but with only the root as the common directory.
+fn path_relative_from(path: &Path, base: &Path) -> Option<PathBuf> {
+ diff_paths(path, base)
+}
+
+fn minimize_rpaths(rpaths: &[String]) -> Vec<String> {
+ let mut set = FxHashSet::default();
+ let mut minimized = Vec::new();
+ for rpath in rpaths {
+ if set.insert(rpath) {
+ minimized.push(rpath.clone());
+ }
+ }
+ minimized
+}
+
+#[cfg(all(unix, test))]
+mod tests;
diff --git a/compiler/rustc_codegen_ssa/src/back/rpath/tests.rs b/compiler/rustc_codegen_ssa/src/back/rpath/tests.rs
new file mode 100644
index 000000000..604f19144
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/rpath/tests.rs
@@ -0,0 +1,72 @@
+use super::RPathConfig;
+use super::{get_rpath_relative_to_output, minimize_rpaths, rpaths_to_flags};
+use std::path::{Path, PathBuf};
+
+#[test]
+fn test_rpaths_to_flags() {
+ let flags = rpaths_to_flags(&["path1".to_string(), "path2".to_string()]);
+ assert_eq!(flags, ["-Wl,-rpath,path1", "-Wl,-rpath,path2"]);
+}
+
+#[test]
+fn test_minimize1() {
+ let res = minimize_rpaths(&["rpath1".to_string(), "rpath2".to_string(), "rpath1".to_string()]);
+ assert!(res == ["rpath1", "rpath2",]);
+}
+
+#[test]
+fn test_minimize2() {
+ let res = minimize_rpaths(&[
+ "1a".to_string(),
+ "2".to_string(),
+ "2".to_string(),
+ "1a".to_string(),
+ "4a".to_string(),
+ "1a".to_string(),
+ "2".to_string(),
+ "3".to_string(),
+ "4a".to_string(),
+ "3".to_string(),
+ ]);
+ assert!(res == ["1a", "2", "4a", "3",]);
+}
+
+#[test]
+fn test_rpath_relative() {
+ if cfg!(target_os = "macos") {
+ let config = &mut RPathConfig {
+ libs: &[],
+ has_rpath: true,
+ is_like_osx: true,
+ linker_is_gnu: false,
+ out_filename: PathBuf::from("bin/rustc"),
+ };
+ let res = get_rpath_relative_to_output(config, Path::new("lib/libstd.so"));
+ assert_eq!(res, "@loader_path/../lib");
+ } else {
+ let config = &mut RPathConfig {
+ libs: &[],
+ out_filename: PathBuf::from("bin/rustc"),
+ has_rpath: true,
+ is_like_osx: false,
+ linker_is_gnu: true,
+ };
+ let res = get_rpath_relative_to_output(config, Path::new("lib/libstd.so"));
+ assert_eq!(res, "$ORIGIN/../lib");
+ }
+}
+
+#[test]
+fn test_xlinker() {
+ let args = rpaths_to_flags(&["a/normal/path".to_string(), "a,comma,path".to_string()]);
+
+ assert_eq!(
+ args,
+ vec![
+ "-Wl,-rpath,a/normal/path".to_string(),
+ "-Wl,-rpath".to_string(),
+ "-Xlinker".to_string(),
+ "a,comma,path".to_string()
+ ]
+ );
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
new file mode 100644
index 000000000..e6b605575
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
@@ -0,0 +1,590 @@
+use std::collections::hash_map::Entry::*;
+
+use rustc_ast::expand::allocator::ALLOCATOR_METHODS;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir as hir;
+use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId, LOCAL_CRATE};
+use rustc_hir::Node;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::middle::exported_symbols::{
+ metadata_symbol_name, ExportedSymbol, SymbolExportInfo, SymbolExportKind, SymbolExportLevel,
+};
+use rustc_middle::ty::query::{ExternProviders, Providers};
+use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
+use rustc_middle::ty::Instance;
+use rustc_middle::ty::{self, SymbolName, TyCtxt};
+use rustc_session::config::CrateType;
+use rustc_target::spec::SanitizerSet;
+
+pub fn threshold(tcx: TyCtxt<'_>) -> SymbolExportLevel {
+ crates_export_threshold(&tcx.sess.crate_types())
+}
+
+fn crate_export_threshold(crate_type: CrateType) -> SymbolExportLevel {
+ match crate_type {
+ CrateType::Executable | CrateType::Staticlib | CrateType::ProcMacro | CrateType::Cdylib => {
+ SymbolExportLevel::C
+ }
+ CrateType::Rlib | CrateType::Dylib => SymbolExportLevel::Rust,
+ }
+}
+
+pub fn crates_export_threshold(crate_types: &[CrateType]) -> SymbolExportLevel {
+ if crate_types
+ .iter()
+ .any(|&crate_type| crate_export_threshold(crate_type) == SymbolExportLevel::Rust)
+ {
+ SymbolExportLevel::Rust
+ } else {
+ SymbolExportLevel::C
+ }
+}
+
+fn reachable_non_generics_provider(tcx: TyCtxt<'_>, cnum: CrateNum) -> DefIdMap<SymbolExportInfo> {
+ assert_eq!(cnum, LOCAL_CRATE);
+
+ if !tcx.sess.opts.output_types.should_codegen() {
+ return Default::default();
+ }
+
+ // Check to see if this crate is a "special runtime crate". These
+ // crates, implementation details of the standard library, typically
+ // have a bunch of `pub extern` and `#[no_mangle]` functions as the
+ // ABI between them. We don't want their symbols to have a `C`
+ // export level, however, as they're just implementation details.
+ // Down below we'll hardwire all of the symbols to the `Rust` export
+ // level instead.
+ let special_runtime_crate =
+ tcx.is_panic_runtime(LOCAL_CRATE) || tcx.is_compiler_builtins(LOCAL_CRATE);
+
+ let mut reachable_non_generics: DefIdMap<_> = tcx
+ .reachable_set(())
+ .iter()
+ .filter_map(|&def_id| {
+ // We want to ignore some FFI functions that are not exposed from
+ // this crate. Reachable FFI functions can be lumped into two
+ // categories:
+ //
+ // 1. Those that are included statically via a static library
+ // 2. Those included otherwise (e.g., dynamically or via a framework)
+ //
+ // Although our LLVM module is not literally emitting code for the
+ // statically included symbols, it's an export of our library which
+ // needs to be passed on to the linker and encoded in the metadata.
+ //
+ // As a result, if this id is an FFI item (foreign item) then we only
+ // let it through if it's included statically.
+ match tcx.hir().get_by_def_id(def_id) {
+ Node::ForeignItem(..) => {
+ tcx.is_statically_included_foreign_item(def_id).then_some(def_id)
+ }
+
+ // Only consider nodes that actually have exported symbols.
+ Node::Item(&hir::Item {
+ kind: hir::ItemKind::Static(..) | hir::ItemKind::Fn(..),
+ ..
+ })
+ | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Fn(..), .. }) => {
+ let generics = tcx.generics_of(def_id);
+ if !generics.requires_monomorphization(tcx)
+ // Functions marked with #[inline] are codegened with "internal"
+ // linkage and are not exported unless marked with an extern
+ // indicator
+ && (!Instance::mono(tcx, def_id.to_def_id()).def.generates_cgu_internal_copy(tcx)
+ || tcx.codegen_fn_attrs(def_id.to_def_id()).contains_extern_indicator())
+ {
+ Some(def_id)
+ } else {
+ None
+ }
+ }
+
+ _ => None,
+ }
+ })
+ .map(|def_id| {
+ let (export_level, used) = if special_runtime_crate {
+ let name = tcx.symbol_name(Instance::mono(tcx, def_id.to_def_id())).name;
+ // We won't link right if these symbols are stripped during LTO.
+ let used = match name {
+ "rust_eh_personality"
+ | "rust_eh_register_frames"
+ | "rust_eh_unregister_frames" => true,
+ _ => false,
+ };
+ (SymbolExportLevel::Rust, used)
+ } else {
+ (symbol_export_level(tcx, def_id.to_def_id()), false)
+ };
+ let codegen_attrs = tcx.codegen_fn_attrs(def_id.to_def_id());
+ debug!(
+ "EXPORTED SYMBOL (local): {} ({:?})",
+ tcx.symbol_name(Instance::mono(tcx, def_id.to_def_id())),
+ export_level
+ );
+ (def_id.to_def_id(), SymbolExportInfo {
+ level: export_level,
+ kind: if tcx.is_static(def_id.to_def_id()) {
+ if codegen_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
+ SymbolExportKind::Tls
+ } else {
+ SymbolExportKind::Data
+ }
+ } else {
+ SymbolExportKind::Text
+ },
+ used: codegen_attrs.flags.contains(CodegenFnAttrFlags::USED)
+ || codegen_attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) || used,
+ })
+ })
+ .collect();
+
+ if let Some(id) = tcx.proc_macro_decls_static(()) {
+ reachable_non_generics.insert(
+ id.to_def_id(),
+ SymbolExportInfo {
+ level: SymbolExportLevel::C,
+ kind: SymbolExportKind::Data,
+ used: false,
+ },
+ );
+ }
+
+ reachable_non_generics
+}
+
+fn is_reachable_non_generic_provider_local(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ let export_threshold = threshold(tcx);
+
+ if let Some(&info) = tcx.reachable_non_generics(def_id.krate).get(&def_id) {
+ info.level.is_below_threshold(export_threshold)
+ } else {
+ false
+ }
+}
+
+fn is_reachable_non_generic_provider_extern(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ tcx.reachable_non_generics(def_id.krate).contains_key(&def_id)
+}
+
+fn exported_symbols_provider_local<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ cnum: CrateNum,
+) -> &'tcx [(ExportedSymbol<'tcx>, SymbolExportInfo)] {
+ assert_eq!(cnum, LOCAL_CRATE);
+
+ if !tcx.sess.opts.output_types.should_codegen() {
+ return &[];
+ }
+
+ let mut symbols: Vec<_> = tcx
+ .reachable_non_generics(LOCAL_CRATE)
+ .iter()
+ .map(|(&def_id, &info)| (ExportedSymbol::NonGeneric(def_id), info))
+ .collect();
+
+ if tcx.entry_fn(()).is_some() {
+ let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, "main"));
+
+ symbols.push((
+ exported_symbol,
+ SymbolExportInfo {
+ level: SymbolExportLevel::C,
+ kind: SymbolExportKind::Text,
+ used: false,
+ },
+ ));
+ }
+
+ if tcx.allocator_kind(()).is_some() {
+ for method in ALLOCATOR_METHODS {
+ let symbol_name = format!("__rust_{}", method.name);
+ let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, &symbol_name));
+
+ symbols.push((
+ exported_symbol,
+ SymbolExportInfo {
+ level: SymbolExportLevel::Rust,
+ kind: SymbolExportKind::Text,
+ used: false,
+ },
+ ));
+ }
+ }
+
+ if tcx.sess.instrument_coverage() || tcx.sess.opts.cg.profile_generate.enabled() {
+ // These are weak symbols that point to the profile version and the
+ // profile name, which need to be treated as exported so LTO doesn't nix
+ // them.
+ const PROFILER_WEAK_SYMBOLS: [&str; 2] =
+ ["__llvm_profile_raw_version", "__llvm_profile_filename"];
+
+ symbols.extend(PROFILER_WEAK_SYMBOLS.iter().map(|sym| {
+ let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, sym));
+ (
+ exported_symbol,
+ SymbolExportInfo {
+ level: SymbolExportLevel::C,
+ kind: SymbolExportKind::Data,
+ used: false,
+ },
+ )
+ }));
+ }
+
+ if tcx.sess.opts.unstable_opts.sanitizer.contains(SanitizerSet::MEMORY) {
+ let mut msan_weak_symbols = Vec::new();
+
+ // Similar to profiling, preserve weak msan symbol during LTO.
+ if tcx.sess.opts.unstable_opts.sanitizer_recover.contains(SanitizerSet::MEMORY) {
+ msan_weak_symbols.push("__msan_keep_going");
+ }
+
+ if tcx.sess.opts.unstable_opts.sanitizer_memory_track_origins != 0 {
+ msan_weak_symbols.push("__msan_track_origins");
+ }
+
+ symbols.extend(msan_weak_symbols.into_iter().map(|sym| {
+ let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, sym));
+ (
+ exported_symbol,
+ SymbolExportInfo {
+ level: SymbolExportLevel::C,
+ kind: SymbolExportKind::Data,
+ used: false,
+ },
+ )
+ }));
+ }
+
+ if tcx.sess.crate_types().contains(&CrateType::Dylib)
+ || tcx.sess.crate_types().contains(&CrateType::ProcMacro)
+ {
+ let symbol_name = metadata_symbol_name(tcx);
+ let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, &symbol_name));
+
+ symbols.push((
+ exported_symbol,
+ SymbolExportInfo {
+ level: SymbolExportLevel::C,
+ kind: SymbolExportKind::Data,
+ used: true,
+ },
+ ));
+ }
+
+ if tcx.sess.opts.share_generics() && tcx.local_crate_exports_generics() {
+ use rustc_middle::mir::mono::{Linkage, MonoItem, Visibility};
+ use rustc_middle::ty::InstanceDef;
+
+ // Normally, we require that shared monomorphizations are not hidden,
+ // because if we want to re-use a monomorphization from a Rust dylib, it
+ // needs to be exported.
+ // However, on platforms that don't allow for Rust dylibs, having
+ // external linkage is enough for monomorphization to be linked to.
+ let need_visibility = tcx.sess.target.dynamic_linking && !tcx.sess.target.only_cdylib;
+
+ let (_, cgus) = tcx.collect_and_partition_mono_items(());
+
+ for (mono_item, &(linkage, visibility)) in cgus.iter().flat_map(|cgu| cgu.items().iter()) {
+ if linkage != Linkage::External {
+ // We can only re-use things with external linkage, otherwise
+ // we'll get a linker error
+ continue;
+ }
+
+ if need_visibility && visibility == Visibility::Hidden {
+ // If we potentially share things from Rust dylibs, they must
+ // not be hidden
+ continue;
+ }
+
+ match *mono_item {
+ MonoItem::Fn(Instance { def: InstanceDef::Item(def), substs }) => {
+ if substs.non_erasable_generics().next().is_some() {
+ let symbol = ExportedSymbol::Generic(def.did, substs);
+ symbols.push((
+ symbol,
+ SymbolExportInfo {
+ level: SymbolExportLevel::Rust,
+ kind: SymbolExportKind::Text,
+ used: false,
+ },
+ ));
+ }
+ }
+ MonoItem::Fn(Instance { def: InstanceDef::DropGlue(_, Some(ty)), substs }) => {
+ // A little sanity-check
+ debug_assert_eq!(
+ substs.non_erasable_generics().next(),
+ Some(GenericArgKind::Type(ty))
+ );
+ symbols.push((
+ ExportedSymbol::DropGlue(ty),
+ SymbolExportInfo {
+ level: SymbolExportLevel::Rust,
+ kind: SymbolExportKind::Text,
+ used: false,
+ },
+ ));
+ }
+ _ => {
+ // Any other symbols don't qualify for sharing
+ }
+ }
+ }
+ }
+
+ // Sort so we get a stable incr. comp. hash.
+ symbols.sort_by_cached_key(|s| s.0.symbol_name_for_local_instance(tcx));
+
+ tcx.arena.alloc_from_iter(symbols)
+}
+
+fn upstream_monomorphizations_provider(
+ tcx: TyCtxt<'_>,
+ (): (),
+) -> DefIdMap<FxHashMap<SubstsRef<'_>, CrateNum>> {
+ let cnums = tcx.crates(());
+
+ let mut instances: DefIdMap<FxHashMap<_, _>> = Default::default();
+
+ let drop_in_place_fn_def_id = tcx.lang_items().drop_in_place_fn();
+
+ for &cnum in cnums.iter() {
+ for (exported_symbol, _) in tcx.exported_symbols(cnum).iter() {
+ let (def_id, substs) = match *exported_symbol {
+ ExportedSymbol::Generic(def_id, substs) => (def_id, substs),
+ ExportedSymbol::DropGlue(ty) => {
+ if let Some(drop_in_place_fn_def_id) = drop_in_place_fn_def_id {
+ (drop_in_place_fn_def_id, tcx.intern_substs(&[ty.into()]))
+ } else {
+ // `drop_in_place` in place does not exist, don't try
+ // to use it.
+ continue;
+ }
+ }
+ ExportedSymbol::NonGeneric(..) | ExportedSymbol::NoDefId(..) => {
+ // These are no monomorphizations
+ continue;
+ }
+ };
+
+ let substs_map = instances.entry(def_id).or_default();
+
+ match substs_map.entry(substs) {
+ Occupied(mut e) => {
+ // If there are multiple monomorphizations available,
+ // we select one deterministically.
+ let other_cnum = *e.get();
+ if tcx.stable_crate_id(other_cnum) > tcx.stable_crate_id(cnum) {
+ e.insert(cnum);
+ }
+ }
+ Vacant(e) => {
+ e.insert(cnum);
+ }
+ }
+ }
+ }
+
+ instances
+}
+
+fn upstream_monomorphizations_for_provider(
+ tcx: TyCtxt<'_>,
+ def_id: DefId,
+) -> Option<&FxHashMap<SubstsRef<'_>, CrateNum>> {
+ debug_assert!(!def_id.is_local());
+ tcx.upstream_monomorphizations(()).get(&def_id)
+}
+
+fn upstream_drop_glue_for_provider<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+) -> Option<CrateNum> {
+ if let Some(def_id) = tcx.lang_items().drop_in_place_fn() {
+ tcx.upstream_monomorphizations_for(def_id).and_then(|monos| monos.get(&substs).cloned())
+ } else {
+ None
+ }
+}
+
+fn is_unreachable_local_definition_provider(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
+ !tcx.reachable_set(()).contains(&def_id)
+}
+
+pub fn provide(providers: &mut Providers) {
+ providers.reachable_non_generics = reachable_non_generics_provider;
+ providers.is_reachable_non_generic = is_reachable_non_generic_provider_local;
+ providers.exported_symbols = exported_symbols_provider_local;
+ providers.upstream_monomorphizations = upstream_monomorphizations_provider;
+ providers.is_unreachable_local_definition = is_unreachable_local_definition_provider;
+ providers.upstream_drop_glue_for = upstream_drop_glue_for_provider;
+ providers.wasm_import_module_map = wasm_import_module_map;
+}
+
+pub fn provide_extern(providers: &mut ExternProviders) {
+ providers.is_reachable_non_generic = is_reachable_non_generic_provider_extern;
+ providers.upstream_monomorphizations_for = upstream_monomorphizations_for_provider;
+}
+
+fn symbol_export_level(tcx: TyCtxt<'_>, sym_def_id: DefId) -> SymbolExportLevel {
+ // We export anything that's not mangled at the "C" layer as it probably has
+ // to do with ABI concerns. We do not, however, apply such treatment to
+ // special symbols in the standard library for various plumbing between
+ // core/std/allocators/etc. For example symbols used to hook up allocation
+ // are not considered for export
+ let codegen_fn_attrs = tcx.codegen_fn_attrs(sym_def_id);
+ let is_extern = codegen_fn_attrs.contains_extern_indicator();
+ let std_internal =
+ codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL);
+
+ if is_extern && !std_internal {
+ let target = &tcx.sess.target.llvm_target;
+ // WebAssembly cannot export data symbols, so reduce their export level
+ if target.contains("emscripten") {
+ if let Some(Node::Item(&hir::Item { kind: hir::ItemKind::Static(..), .. })) =
+ tcx.hir().get_if_local(sym_def_id)
+ {
+ return SymbolExportLevel::Rust;
+ }
+ }
+
+ SymbolExportLevel::C
+ } else {
+ SymbolExportLevel::Rust
+ }
+}
+
+/// This is the symbol name of the given instance instantiated in a specific crate.
+pub fn symbol_name_for_instance_in_crate<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ symbol: ExportedSymbol<'tcx>,
+ instantiating_crate: CrateNum,
+) -> String {
+ // If this is something instantiated in the local crate then we might
+ // already have cached the name as a query result.
+ if instantiating_crate == LOCAL_CRATE {
+ return symbol.symbol_name_for_local_instance(tcx).to_string();
+ }
+
+ // This is something instantiated in an upstream crate, so we have to use
+ // the slower (because uncached) version of computing the symbol name.
+ match symbol {
+ ExportedSymbol::NonGeneric(def_id) => {
+ rustc_symbol_mangling::symbol_name_for_instance_in_crate(
+ tcx,
+ Instance::mono(tcx, def_id),
+ instantiating_crate,
+ )
+ }
+ ExportedSymbol::Generic(def_id, substs) => {
+ rustc_symbol_mangling::symbol_name_for_instance_in_crate(
+ tcx,
+ Instance::new(def_id, substs),
+ instantiating_crate,
+ )
+ }
+ ExportedSymbol::DropGlue(ty) => rustc_symbol_mangling::symbol_name_for_instance_in_crate(
+ tcx,
+ Instance::resolve_drop_in_place(tcx, ty),
+ instantiating_crate,
+ ),
+ ExportedSymbol::NoDefId(symbol_name) => symbol_name.to_string(),
+ }
+}
+
+/// This is the symbol name of the given instance as seen by the linker.
+///
+/// On 32-bit Windows symbols are decorated according to their calling conventions.
+pub fn linking_symbol_name_for_instance_in_crate<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ symbol: ExportedSymbol<'tcx>,
+ instantiating_crate: CrateNum,
+) -> String {
+ use rustc_target::abi::call::Conv;
+
+ let mut undecorated = symbol_name_for_instance_in_crate(tcx, symbol, instantiating_crate);
+
+ let target = &tcx.sess.target;
+ if !target.is_like_windows {
+ // Mach-O has a global "_" suffix and `object` crate will handle it.
+ // ELF does not have any symbol decorations.
+ return undecorated;
+ }
+
+ let x86 = match &target.arch[..] {
+ "x86" => true,
+ "x86_64" => false,
+ // Only x86/64 use symbol decorations.
+ _ => return undecorated,
+ };
+
+ let instance = match symbol {
+ ExportedSymbol::NonGeneric(def_id) | ExportedSymbol::Generic(def_id, _)
+ if tcx.is_static(def_id) =>
+ {
+ None
+ }
+ ExportedSymbol::NonGeneric(def_id) => Some(Instance::mono(tcx, def_id)),
+ ExportedSymbol::Generic(def_id, substs) => Some(Instance::new(def_id, substs)),
+ // DropGlue always use the Rust calling convention and thus follow the target's default
+ // symbol decoration scheme.
+ ExportedSymbol::DropGlue(..) => None,
+ // NoDefId always follow the target's default symbol decoration scheme.
+ ExportedSymbol::NoDefId(..) => None,
+ };
+
+ let (conv, args) = instance
+ .map(|i| {
+ tcx.fn_abi_of_instance(ty::ParamEnv::reveal_all().and((i, ty::List::empty())))
+ .unwrap_or_else(|_| bug!("fn_abi_of_instance({i:?}) failed"))
+ })
+ .map(|fnabi| (fnabi.conv, &fnabi.args[..]))
+ .unwrap_or((Conv::Rust, &[]));
+
+ // Decorate symbols with prefices, suffices and total number of bytes of arguments.
+ // Reference: https://docs.microsoft.com/en-us/cpp/build/reference/decorated-names?view=msvc-170
+ let (prefix, suffix) = match conv {
+ Conv::X86Fastcall => ("@", "@"),
+ Conv::X86Stdcall => ("_", "@"),
+ Conv::X86VectorCall => ("", "@@"),
+ _ => {
+ if x86 {
+ undecorated.insert(0, '_');
+ }
+ return undecorated;
+ }
+ };
+
+ let args_in_bytes: u64 = args
+ .iter()
+ .map(|abi| abi.layout.size.bytes().next_multiple_of(target.pointer_width as u64 / 8))
+ .sum();
+ format!("{prefix}{undecorated}{suffix}{args_in_bytes}")
+}
+
+fn wasm_import_module_map(tcx: TyCtxt<'_>, cnum: CrateNum) -> FxHashMap<DefId, String> {
+ // Build up a map from DefId to a `NativeLib` structure, where
+ // `NativeLib` internally contains information about
+ // `#[link(wasm_import_module = "...")]` for example.
+ let native_libs = tcx.native_libraries(cnum);
+
+ let def_id_to_native_lib = native_libs
+ .iter()
+ .filter_map(|lib| lib.foreign_module.map(|id| (id, lib)))
+ .collect::<FxHashMap<_, _>>();
+
+ let mut ret = FxHashMap::default();
+ for (def_id, lib) in tcx.foreign_modules(cnum).iter() {
+ let module = def_id_to_native_lib.get(&def_id).and_then(|s| s.wasm_import_module);
+ let Some(module) = module else { continue };
+ ret.extend(lib.foreign_items.iter().map(|id| {
+ assert_eq!(id.krate, cnum);
+ (*id, module.to_string())
+ }));
+ }
+
+ ret
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs
new file mode 100644
index 000000000..1b5ad8710
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/write.rs
@@ -0,0 +1,2015 @@
+use super::link::{self, ensure_removed};
+use super::lto::{self, SerializedModule};
+use super::symbol_export::symbol_name_for_instance_in_crate;
+
+use crate::{
+ CachedModuleCodegen, CodegenResults, CompiledModule, CrateInfo, ModuleCodegen, ModuleKind,
+};
+
+use crate::traits::*;
+use jobserver::{Acquired, Client};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::memmap::Mmap;
+use rustc_data_structures::profiling::SelfProfilerRef;
+use rustc_data_structures::profiling::TimingGuard;
+use rustc_data_structures::profiling::VerboseTimingGuard;
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::emitter::Emitter;
+use rustc_errors::{DiagnosticId, FatalError, Handler, Level};
+use rustc_fs_util::link_or_copy;
+use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
+use rustc_incremental::{
+ copy_cgu_workproduct_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess,
+};
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::middle::exported_symbols::SymbolExportInfo;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::cgu_reuse_tracker::CguReuseTracker;
+use rustc_session::config::{self, CrateType, Lto, OutputFilenames, OutputType};
+use rustc_session::config::{Passes, SwitchWithOptPath};
+use rustc_session::Session;
+use rustc_span::source_map::SourceMap;
+use rustc_span::symbol::sym;
+use rustc_span::{BytePos, FileName, InnerSpan, Pos, Span};
+use rustc_target::spec::{MergeFunctions, SanitizerSet};
+
+use std::any::Any;
+use std::fs;
+use std::io;
+use std::marker::PhantomData;
+use std::mem;
+use std::path::{Path, PathBuf};
+use std::str;
+use std::sync::mpsc::{channel, Receiver, Sender};
+use std::sync::Arc;
+use std::thread;
+
+const PRE_LTO_BC_EXT: &str = "pre-lto.bc";
+
+/// What kind of object file to emit.
+#[derive(Clone, Copy, PartialEq)]
+pub enum EmitObj {
+ // No object file.
+ None,
+
+ // Just uncompressed llvm bitcode. Provides easy compatibility with
+ // emscripten's ecc compiler, when used as the linker.
+ Bitcode,
+
+ // Object code, possibly augmented with a bitcode section.
+ ObjectCode(BitcodeSection),
+}
+
+/// What kind of llvm bitcode section to embed in an object file.
+#[derive(Clone, Copy, PartialEq)]
+pub enum BitcodeSection {
+ // No bitcode section.
+ None,
+
+ // A full, uncompressed bitcode section.
+ Full,
+}
+
+/// Module-specific configuration for `optimize_and_codegen`.
+pub struct ModuleConfig {
+ /// Names of additional optimization passes to run.
+ pub passes: Vec<String>,
+ /// Some(level) to optimize at a certain level, or None to run
+ /// absolutely no optimizations (used for the metadata module).
+ pub opt_level: Option<config::OptLevel>,
+
+ /// Some(level) to optimize binary size, or None to not affect program size.
+ pub opt_size: Option<config::OptLevel>,
+
+ pub pgo_gen: SwitchWithOptPath,
+ pub pgo_use: Option<PathBuf>,
+ pub pgo_sample_use: Option<PathBuf>,
+ pub debug_info_for_profiling: bool,
+ pub instrument_coverage: bool,
+ pub instrument_gcov: bool,
+
+ pub sanitizer: SanitizerSet,
+ pub sanitizer_recover: SanitizerSet,
+ pub sanitizer_memory_track_origins: usize,
+
+ // Flags indicating which outputs to produce.
+ pub emit_pre_lto_bc: bool,
+ pub emit_no_opt_bc: bool,
+ pub emit_bc: bool,
+ pub emit_ir: bool,
+ pub emit_asm: bool,
+ pub emit_obj: EmitObj,
+ pub emit_thin_lto: bool,
+ pub bc_cmdline: String,
+
+ // Miscellaneous flags. These are mostly copied from command-line
+ // options.
+ pub verify_llvm_ir: bool,
+ pub no_prepopulate_passes: bool,
+ pub no_builtins: bool,
+ pub time_module: bool,
+ pub vectorize_loop: bool,
+ pub vectorize_slp: bool,
+ pub merge_functions: bool,
+ pub inline_threshold: Option<u32>,
+ pub new_llvm_pass_manager: Option<bool>,
+ pub emit_lifetime_markers: bool,
+ pub llvm_plugins: Vec<String>,
+}
+
+impl ModuleConfig {
+ fn new(
+ kind: ModuleKind,
+ sess: &Session,
+ no_builtins: bool,
+ is_compiler_builtins: bool,
+ ) -> ModuleConfig {
+ // If it's a regular module, use `$regular`, otherwise use `$other`.
+ // `$regular` and `$other` are evaluated lazily.
+ macro_rules! if_regular {
+ ($regular: expr, $other: expr) => {
+ if let ModuleKind::Regular = kind { $regular } else { $other }
+ };
+ }
+
+ let opt_level_and_size = if_regular!(Some(sess.opts.optimize), None);
+
+ let save_temps = sess.opts.cg.save_temps;
+
+ let should_emit_obj = sess.opts.output_types.contains_key(&OutputType::Exe)
+ || match kind {
+ ModuleKind::Regular => sess.opts.output_types.contains_key(&OutputType::Object),
+ ModuleKind::Allocator => false,
+ ModuleKind::Metadata => sess.opts.output_types.contains_key(&OutputType::Metadata),
+ };
+
+ let emit_obj = if !should_emit_obj {
+ EmitObj::None
+ } else if sess.target.obj_is_bitcode
+ || (sess.opts.cg.linker_plugin_lto.enabled() && !no_builtins)
+ {
+ // This case is selected if the target uses objects as bitcode, or
+ // if linker plugin LTO is enabled. In the linker plugin LTO case
+ // the assumption is that the final link-step will read the bitcode
+ // and convert it to object code. This may be done by either the
+ // native linker or rustc itself.
+ //
+ // Note, however, that the linker-plugin-lto requested here is
+ // explicitly ignored for `#![no_builtins]` crates. These crates are
+ // specifically ignored by rustc's LTO passes and wouldn't work if
+ // loaded into the linker. These crates define symbols that LLVM
+ // lowers intrinsics to, and these symbol dependencies aren't known
+ // until after codegen. As a result any crate marked
+ // `#![no_builtins]` is assumed to not participate in LTO and
+ // instead goes on to generate object code.
+ EmitObj::Bitcode
+ } else if need_bitcode_in_object(sess) {
+ EmitObj::ObjectCode(BitcodeSection::Full)
+ } else {
+ EmitObj::ObjectCode(BitcodeSection::None)
+ };
+
+ ModuleConfig {
+ passes: if_regular!(sess.opts.cg.passes.clone(), vec![]),
+
+ opt_level: opt_level_and_size,
+ opt_size: opt_level_and_size,
+
+ pgo_gen: if_regular!(
+ sess.opts.cg.profile_generate.clone(),
+ SwitchWithOptPath::Disabled
+ ),
+ pgo_use: if_regular!(sess.opts.cg.profile_use.clone(), None),
+ pgo_sample_use: if_regular!(sess.opts.unstable_opts.profile_sample_use.clone(), None),
+ debug_info_for_profiling: sess.opts.unstable_opts.debug_info_for_profiling,
+ instrument_coverage: if_regular!(sess.instrument_coverage(), false),
+ instrument_gcov: if_regular!(
+ // compiler_builtins overrides the codegen-units settings,
+ // which is incompatible with -Zprofile which requires that
+ // only a single codegen unit is used per crate.
+ sess.opts.unstable_opts.profile && !is_compiler_builtins,
+ false
+ ),
+
+ sanitizer: if_regular!(sess.opts.unstable_opts.sanitizer, SanitizerSet::empty()),
+ sanitizer_recover: if_regular!(
+ sess.opts.unstable_opts.sanitizer_recover,
+ SanitizerSet::empty()
+ ),
+ sanitizer_memory_track_origins: if_regular!(
+ sess.opts.unstable_opts.sanitizer_memory_track_origins,
+ 0
+ ),
+
+ emit_pre_lto_bc: if_regular!(
+ save_temps || need_pre_lto_bitcode_for_incr_comp(sess),
+ false
+ ),
+ emit_no_opt_bc: if_regular!(save_temps, false),
+ emit_bc: if_regular!(
+ save_temps || sess.opts.output_types.contains_key(&OutputType::Bitcode),
+ save_temps
+ ),
+ emit_ir: if_regular!(
+ sess.opts.output_types.contains_key(&OutputType::LlvmAssembly),
+ false
+ ),
+ emit_asm: if_regular!(
+ sess.opts.output_types.contains_key(&OutputType::Assembly),
+ false
+ ),
+ emit_obj,
+ emit_thin_lto: sess.opts.unstable_opts.emit_thin_lto,
+ bc_cmdline: sess.target.bitcode_llvm_cmdline.to_string(),
+
+ verify_llvm_ir: sess.verify_llvm_ir(),
+ no_prepopulate_passes: sess.opts.cg.no_prepopulate_passes,
+ no_builtins: no_builtins || sess.target.no_builtins,
+
+ // Exclude metadata and allocator modules from time_passes output,
+ // since they throw off the "LLVM passes" measurement.
+ time_module: if_regular!(true, false),
+
+ // Copy what clang does by turning on loop vectorization at O2 and
+ // slp vectorization at O3.
+ vectorize_loop: !sess.opts.cg.no_vectorize_loops
+ && (sess.opts.optimize == config::OptLevel::Default
+ || sess.opts.optimize == config::OptLevel::Aggressive),
+ vectorize_slp: !sess.opts.cg.no_vectorize_slp
+ && sess.opts.optimize == config::OptLevel::Aggressive,
+
+ // Some targets (namely, NVPTX) interact badly with the
+ // MergeFunctions pass. This is because MergeFunctions can generate
+ // new function calls which may interfere with the target calling
+ // convention; e.g. for the NVPTX target, PTX kernels should not
+ // call other PTX kernels. MergeFunctions can also be configured to
+ // generate aliases instead, but aliases are not supported by some
+ // backends (again, NVPTX). Therefore, allow targets to opt out of
+ // the MergeFunctions pass, but otherwise keep the pass enabled (at
+ // O2 and O3) since it can be useful for reducing code size.
+ merge_functions: match sess
+ .opts
+ .unstable_opts
+ .merge_functions
+ .unwrap_or(sess.target.merge_functions)
+ {
+ MergeFunctions::Disabled => false,
+ MergeFunctions::Trampolines | MergeFunctions::Aliases => {
+ sess.opts.optimize == config::OptLevel::Default
+ || sess.opts.optimize == config::OptLevel::Aggressive
+ }
+ },
+
+ inline_threshold: sess.opts.cg.inline_threshold,
+ new_llvm_pass_manager: sess.opts.unstable_opts.new_llvm_pass_manager,
+ emit_lifetime_markers: sess.emit_lifetime_markers(),
+ llvm_plugins: if_regular!(sess.opts.unstable_opts.llvm_plugins.clone(), vec![]),
+ }
+ }
+
+ pub fn bitcode_needed(&self) -> bool {
+ self.emit_bc
+ || self.emit_obj == EmitObj::Bitcode
+ || self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)
+ }
+}
+
+/// Configuration passed to the function returned by the `target_machine_factory`.
+pub struct TargetMachineFactoryConfig {
+ /// Split DWARF is enabled in LLVM by checking that `TM.MCOptions.SplitDwarfFile` isn't empty,
+ /// so the path to the dwarf object has to be provided when we create the target machine.
+ /// This can be ignored by backends which do not need it for their Split DWARF support.
+ pub split_dwarf_file: Option<PathBuf>,
+}
+
+impl TargetMachineFactoryConfig {
+ pub fn new(
+ cgcx: &CodegenContext<impl WriteBackendMethods>,
+ module_name: &str,
+ ) -> TargetMachineFactoryConfig {
+ let split_dwarf_file = if cgcx.target_can_use_split_dwarf {
+ cgcx.output_filenames.split_dwarf_path(
+ cgcx.split_debuginfo,
+ cgcx.split_dwarf_kind,
+ Some(module_name),
+ )
+ } else {
+ None
+ };
+ TargetMachineFactoryConfig { split_dwarf_file }
+ }
+}
+
+pub type TargetMachineFactoryFn<B> = Arc<
+ dyn Fn(TargetMachineFactoryConfig) -> Result<<B as WriteBackendMethods>::TargetMachine, String>
+ + Send
+ + Sync,
+>;
+
+pub type ExportedSymbols = FxHashMap<CrateNum, Arc<Vec<(String, SymbolExportInfo)>>>;
+
+/// Additional resources used by optimize_and_codegen (not module specific)
+#[derive(Clone)]
+pub struct CodegenContext<B: WriteBackendMethods> {
+ // Resources needed when running LTO
+ pub backend: B,
+ pub prof: SelfProfilerRef,
+ pub lto: Lto,
+ pub save_temps: bool,
+ pub fewer_names: bool,
+ pub time_trace: bool,
+ pub exported_symbols: Option<Arc<ExportedSymbols>>,
+ pub opts: Arc<config::Options>,
+ pub crate_types: Vec<CrateType>,
+ pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>,
+ pub output_filenames: Arc<OutputFilenames>,
+ pub regular_module_config: Arc<ModuleConfig>,
+ pub metadata_module_config: Arc<ModuleConfig>,
+ pub allocator_module_config: Arc<ModuleConfig>,
+ pub tm_factory: TargetMachineFactoryFn<B>,
+ pub msvc_imps_needed: bool,
+ pub is_pe_coff: bool,
+ pub target_can_use_split_dwarf: bool,
+ pub target_pointer_width: u32,
+ pub target_arch: String,
+ pub debuginfo: config::DebugInfo,
+ pub split_debuginfo: rustc_target::spec::SplitDebuginfo,
+ pub split_dwarf_kind: rustc_session::config::SplitDwarfKind,
+
+ // Number of cgus excluding the allocator/metadata modules
+ pub total_cgus: usize,
+ // Handler to use for diagnostics produced during codegen.
+ pub diag_emitter: SharedEmitter,
+ // LLVM optimizations for which we want to print remarks.
+ pub remark: Passes,
+ // Worker thread number
+ pub worker: usize,
+ // The incremental compilation session directory, or None if we are not
+ // compiling incrementally
+ pub incr_comp_session_dir: Option<PathBuf>,
+ // Used to update CGU re-use information during the thinlto phase.
+ pub cgu_reuse_tracker: CguReuseTracker,
+ // Channel back to the main control thread to send messages to
+ pub coordinator_send: Sender<Box<dyn Any + Send>>,
+}
+
+impl<B: WriteBackendMethods> CodegenContext<B> {
+ pub fn create_diag_handler(&self) -> Handler {
+ Handler::with_emitter(true, None, Box::new(self.diag_emitter.clone()))
+ }
+
+ pub fn config(&self, kind: ModuleKind) -> &ModuleConfig {
+ match kind {
+ ModuleKind::Regular => &self.regular_module_config,
+ ModuleKind::Metadata => &self.metadata_module_config,
+ ModuleKind::Allocator => &self.allocator_module_config,
+ }
+ }
+}
+
+fn generate_lto_work<B: ExtraBackendMethods>(
+ cgcx: &CodegenContext<B>,
+ needs_fat_lto: Vec<FatLTOInput<B>>,
+ needs_thin_lto: Vec<(String, B::ThinBuffer)>,
+ import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
+) -> Vec<(WorkItem<B>, u64)> {
+ let _prof_timer = cgcx.prof.generic_activity("codegen_generate_lto_work");
+
+ let (lto_modules, copy_jobs) = if !needs_fat_lto.is_empty() {
+ assert!(needs_thin_lto.is_empty());
+ let lto_module =
+ B::run_fat_lto(cgcx, needs_fat_lto, import_only_modules).unwrap_or_else(|e| e.raise());
+ (vec![lto_module], vec![])
+ } else {
+ assert!(needs_fat_lto.is_empty());
+ B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules).unwrap_or_else(|e| e.raise())
+ };
+
+ lto_modules
+ .into_iter()
+ .map(|module| {
+ let cost = module.cost();
+ (WorkItem::LTO(module), cost)
+ })
+ .chain(copy_jobs.into_iter().map(|wp| {
+ (
+ WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen {
+ name: wp.cgu_name.clone(),
+ source: wp,
+ }),
+ 0,
+ )
+ }))
+ .collect()
+}
+
+pub struct CompiledModules {
+ pub modules: Vec<CompiledModule>,
+ pub allocator_module: Option<CompiledModule>,
+}
+
+fn need_bitcode_in_object(sess: &Session) -> bool {
+ let requested_for_rlib = sess.opts.cg.embed_bitcode
+ && sess.crate_types().contains(&CrateType::Rlib)
+ && sess.opts.output_types.contains_key(&OutputType::Exe);
+ let forced_by_target = sess.target.forces_embed_bitcode;
+ requested_for_rlib || forced_by_target
+}
+
+fn need_pre_lto_bitcode_for_incr_comp(sess: &Session) -> bool {
+ if sess.opts.incremental.is_none() {
+ return false;
+ }
+
+ match sess.lto() {
+ Lto::No => false,
+ Lto::Fat | Lto::Thin | Lto::ThinLocal => true,
+ }
+}
+
+pub fn start_async_codegen<B: ExtraBackendMethods>(
+ backend: B,
+ tcx: TyCtxt<'_>,
+ target_cpu: String,
+ metadata: EncodedMetadata,
+ metadata_module: Option<CompiledModule>,
+ total_cgus: usize,
+) -> OngoingCodegen<B> {
+ let (coordinator_send, coordinator_receive) = channel();
+ let sess = tcx.sess;
+
+ let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
+ let no_builtins = tcx.sess.contains_name(crate_attrs, sym::no_builtins);
+ let is_compiler_builtins = tcx.sess.contains_name(crate_attrs, sym::compiler_builtins);
+
+ let crate_info = CrateInfo::new(tcx, target_cpu);
+
+ let regular_config =
+ ModuleConfig::new(ModuleKind::Regular, sess, no_builtins, is_compiler_builtins);
+ let metadata_config =
+ ModuleConfig::new(ModuleKind::Metadata, sess, no_builtins, is_compiler_builtins);
+ let allocator_config =
+ ModuleConfig::new(ModuleKind::Allocator, sess, no_builtins, is_compiler_builtins);
+
+ let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
+ let (codegen_worker_send, codegen_worker_receive) = channel();
+
+ let coordinator_thread = start_executing_work(
+ backend.clone(),
+ tcx,
+ &crate_info,
+ shared_emitter,
+ codegen_worker_send,
+ coordinator_receive,
+ total_cgus,
+ sess.jobserver.clone(),
+ Arc::new(regular_config),
+ Arc::new(metadata_config),
+ Arc::new(allocator_config),
+ coordinator_send.clone(),
+ );
+
+ OngoingCodegen {
+ backend,
+ metadata,
+ metadata_module,
+ crate_info,
+
+ codegen_worker_receive,
+ shared_emitter_main,
+ coordinator: Coordinator {
+ sender: coordinator_send,
+ future: Some(coordinator_thread),
+ phantom: PhantomData,
+ },
+ output_filenames: tcx.output_filenames(()).clone(),
+ }
+}
+
+fn copy_all_cgu_workproducts_to_incr_comp_cache_dir(
+ sess: &Session,
+ compiled_modules: &CompiledModules,
+) -> FxHashMap<WorkProductId, WorkProduct> {
+ let mut work_products = FxHashMap::default();
+
+ if sess.opts.incremental.is_none() {
+ return work_products;
+ }
+
+ let _timer = sess.timer("copy_all_cgu_workproducts_to_incr_comp_cache_dir");
+
+ for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) {
+ let mut files = Vec::new();
+ if let Some(object_file_path) = &module.object {
+ files.push(("o", object_file_path.as_path()));
+ }
+ if let Some(dwarf_object_file_path) = &module.dwarf_object {
+ files.push(("dwo", dwarf_object_file_path.as_path()));
+ }
+
+ if let Some((id, product)) =
+ copy_cgu_workproduct_to_incr_comp_cache_dir(sess, &module.name, files.as_slice())
+ {
+ work_products.insert(id, product);
+ }
+ }
+
+ work_products
+}
+
+fn produce_final_output_artifacts(
+ sess: &Session,
+ compiled_modules: &CompiledModules,
+ crate_output: &OutputFilenames,
+) {
+ let mut user_wants_bitcode = false;
+ let mut user_wants_objects = false;
+
+ // Produce final compile outputs.
+ let copy_gracefully = |from: &Path, to: &Path| {
+ if let Err(e) = fs::copy(from, to) {
+ sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e));
+ }
+ };
+
+ let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| {
+ if compiled_modules.modules.len() == 1 {
+ // 1) Only one codegen unit. In this case it's no difficulty
+ // to copy `foo.0.x` to `foo.x`.
+ let module_name = Some(&compiled_modules.modules[0].name[..]);
+ let path = crate_output.temp_path(output_type, module_name);
+ copy_gracefully(&path, &crate_output.path(output_type));
+ if !sess.opts.cg.save_temps && !keep_numbered {
+ // The user just wants `foo.x`, not `foo.#module-name#.x`.
+ ensure_removed(sess.diagnostic(), &path);
+ }
+ } else {
+ let ext = crate_output
+ .temp_path(output_type, None)
+ .extension()
+ .unwrap()
+ .to_str()
+ .unwrap()
+ .to_owned();
+
+ if crate_output.outputs.contains_key(&output_type) {
+ // 2) Multiple codegen units, with `--emit foo=some_name`. We have
+ // no good solution for this case, so warn the user.
+ sess.warn(&format!(
+ "ignoring emit path because multiple .{} files \
+ were produced",
+ ext
+ ));
+ } else if crate_output.single_output_file.is_some() {
+ // 3) Multiple codegen units, with `-o some_name`. We have
+ // no good solution for this case, so warn the user.
+ sess.warn(&format!(
+ "ignoring -o because multiple .{} files \
+ were produced",
+ ext
+ ));
+ } else {
+ // 4) Multiple codegen units, but no explicit name. We
+ // just leave the `foo.0.x` files in place.
+ // (We don't have to do any work in this case.)
+ }
+ }
+ };
+
+ // Flag to indicate whether the user explicitly requested bitcode.
+ // Otherwise, we produced it only as a temporary output, and will need
+ // to get rid of it.
+ for output_type in crate_output.outputs.keys() {
+ match *output_type {
+ OutputType::Bitcode => {
+ user_wants_bitcode = true;
+ // Copy to .bc, but always keep the .0.bc. There is a later
+ // check to figure out if we should delete .0.bc files, or keep
+ // them for making an rlib.
+ copy_if_one_unit(OutputType::Bitcode, true);
+ }
+ OutputType::LlvmAssembly => {
+ copy_if_one_unit(OutputType::LlvmAssembly, false);
+ }
+ OutputType::Assembly => {
+ copy_if_one_unit(OutputType::Assembly, false);
+ }
+ OutputType::Object => {
+ user_wants_objects = true;
+ copy_if_one_unit(OutputType::Object, true);
+ }
+ OutputType::Mir | OutputType::Metadata | OutputType::Exe | OutputType::DepInfo => {}
+ }
+ }
+
+ // Clean up unwanted temporary files.
+
+ // We create the following files by default:
+ // - #crate#.#module-name#.bc
+ // - #crate#.#module-name#.o
+ // - #crate#.crate.metadata.bc
+ // - #crate#.crate.metadata.o
+ // - #crate#.o (linked from crate.##.o)
+ // - #crate#.bc (copied from crate.##.bc)
+ // We may create additional files if requested by the user (through
+ // `-C save-temps` or `--emit=` flags).
+
+ if !sess.opts.cg.save_temps {
+ // Remove the temporary .#module-name#.o objects. If the user didn't
+ // explicitly request bitcode (with --emit=bc), and the bitcode is not
+ // needed for building an rlib, then we must remove .#module-name#.bc as
+ // well.
+
+ // Specific rules for keeping .#module-name#.bc:
+ // - If the user requested bitcode (`user_wants_bitcode`), and
+ // codegen_units > 1, then keep it.
+ // - If the user requested bitcode but codegen_units == 1, then we
+ // can toss .#module-name#.bc because we copied it to .bc earlier.
+ // - If we're not building an rlib and the user didn't request
+ // bitcode, then delete .#module-name#.bc.
+ // If you change how this works, also update back::link::link_rlib,
+ // where .#module-name#.bc files are (maybe) deleted after making an
+ // rlib.
+ let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe);
+
+ let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units() > 1;
+
+ let keep_numbered_objects =
+ needs_crate_object || (user_wants_objects && sess.codegen_units() > 1);
+
+ for module in compiled_modules.modules.iter() {
+ if let Some(ref path) = module.object {
+ if !keep_numbered_objects {
+ ensure_removed(sess.diagnostic(), path);
+ }
+ }
+
+ if let Some(ref path) = module.dwarf_object {
+ if !keep_numbered_objects {
+ ensure_removed(sess.diagnostic(), path);
+ }
+ }
+
+ if let Some(ref path) = module.bytecode {
+ if !keep_numbered_bitcode {
+ ensure_removed(sess.diagnostic(), path);
+ }
+ }
+ }
+
+ if !user_wants_bitcode {
+ if let Some(ref allocator_module) = compiled_modules.allocator_module {
+ if let Some(ref path) = allocator_module.bytecode {
+ ensure_removed(sess.diagnostic(), path);
+ }
+ }
+ }
+ }
+
+ // We leave the following files around by default:
+ // - #crate#.o
+ // - #crate#.crate.metadata.o
+ // - #crate#.bc
+ // These are used in linking steps and will be cleaned up afterward.
+}
+
+pub enum WorkItem<B: WriteBackendMethods> {
+ /// Optimize a newly codegened, totally unoptimized module.
+ Optimize(ModuleCodegen<B::Module>),
+ /// Copy the post-LTO artifacts from the incremental cache to the output
+ /// directory.
+ CopyPostLtoArtifacts(CachedModuleCodegen),
+ /// Performs (Thin)LTO on the given module.
+ LTO(lto::LtoModuleCodegen<B>),
+}
+
+impl<B: WriteBackendMethods> WorkItem<B> {
+ pub fn module_kind(&self) -> ModuleKind {
+ match *self {
+ WorkItem::Optimize(ref m) => m.kind,
+ WorkItem::CopyPostLtoArtifacts(_) | WorkItem::LTO(_) => ModuleKind::Regular,
+ }
+ }
+
+ fn start_profiling<'a>(&self, cgcx: &'a CodegenContext<B>) -> TimingGuard<'a> {
+ match *self {
+ WorkItem::Optimize(ref m) => {
+ cgcx.prof.generic_activity_with_arg("codegen_module_optimize", &*m.name)
+ }
+ WorkItem::CopyPostLtoArtifacts(ref m) => cgcx
+ .prof
+ .generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &*m.name),
+ WorkItem::LTO(ref m) => {
+ cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", m.name())
+ }
+ }
+ }
+
+ /// Generate a short description of this work item suitable for use as a thread name.
+ fn short_description(&self) -> String {
+ // `pthread_setname()` on *nix is limited to 15 characters and longer names are ignored.
+ // Use very short descriptions in this case to maximize the space available for the module name.
+ // Windows does not have that limitation so use slightly more descriptive names there.
+ match self {
+ WorkItem::Optimize(m) => {
+ #[cfg(windows)]
+ return format!("optimize module {}", m.name);
+ #[cfg(not(windows))]
+ return format!("opt {}", m.name);
+ }
+ WorkItem::CopyPostLtoArtifacts(m) => {
+ #[cfg(windows)]
+ return format!("copy LTO artifacts for {}", m.name);
+ #[cfg(not(windows))]
+ return format!("copy {}", m.name);
+ }
+ WorkItem::LTO(m) => {
+ #[cfg(windows)]
+ return format!("LTO module {}", m.name());
+ #[cfg(not(windows))]
+ return format!("LTO {}", m.name());
+ }
+ }
+ }
+}
+
+enum WorkItemResult<B: WriteBackendMethods> {
+ Compiled(CompiledModule),
+ NeedsLink(ModuleCodegen<B::Module>),
+ NeedsFatLTO(FatLTOInput<B>),
+ NeedsThinLTO(String, B::ThinBuffer),
+}
+
+pub enum FatLTOInput<B: WriteBackendMethods> {
+ Serialized { name: String, buffer: B::ModuleBuffer },
+ InMemory(ModuleCodegen<B::Module>),
+}
+
+fn execute_work_item<B: ExtraBackendMethods>(
+ cgcx: &CodegenContext<B>,
+ work_item: WorkItem<B>,
+) -> Result<WorkItemResult<B>, FatalError> {
+ let module_config = cgcx.config(work_item.module_kind());
+
+ match work_item {
+ WorkItem::Optimize(module) => execute_optimize_work_item(cgcx, module, module_config),
+ WorkItem::CopyPostLtoArtifacts(module) => {
+ Ok(execute_copy_from_cache_work_item(cgcx, module, module_config))
+ }
+ WorkItem::LTO(module) => execute_lto_work_item(cgcx, module, module_config),
+ }
+}
+
+// Actual LTO type we end up choosing based on multiple factors.
+pub enum ComputedLtoType {
+ No,
+ Thin,
+ Fat,
+}
+
+pub fn compute_per_cgu_lto_type(
+ sess_lto: &Lto,
+ opts: &config::Options,
+ sess_crate_types: &[CrateType],
+ module_kind: ModuleKind,
+) -> ComputedLtoType {
+ // Metadata modules never participate in LTO regardless of the lto
+ // settings.
+ if module_kind == ModuleKind::Metadata {
+ return ComputedLtoType::No;
+ }
+
+ // If the linker does LTO, we don't have to do it. Note that we
+ // keep doing full LTO, if it is requested, as not to break the
+ // assumption that the output will be a single module.
+ let linker_does_lto = opts.cg.linker_plugin_lto.enabled();
+
+ // When we're automatically doing ThinLTO for multi-codegen-unit
+ // builds we don't actually want to LTO the allocator modules if
+ // it shows up. This is due to various linker shenanigans that
+ // we'll encounter later.
+ let is_allocator = module_kind == ModuleKind::Allocator;
+
+ // We ignore a request for full crate graph LTO if the crate type
+ // is only an rlib, as there is no full crate graph to process,
+ // that'll happen later.
+ //
+ // This use case currently comes up primarily for targets that
+ // require LTO so the request for LTO is always unconditionally
+ // passed down to the backend, but we don't actually want to do
+ // anything about it yet until we've got a final product.
+ let is_rlib = sess_crate_types.len() == 1 && sess_crate_types[0] == CrateType::Rlib;
+
+ match sess_lto {
+ Lto::ThinLocal if !linker_does_lto && !is_allocator => ComputedLtoType::Thin,
+ Lto::Thin if !linker_does_lto && !is_rlib => ComputedLtoType::Thin,
+ Lto::Fat if !is_rlib => ComputedLtoType::Fat,
+ _ => ComputedLtoType::No,
+ }
+}
+
+fn execute_optimize_work_item<B: ExtraBackendMethods>(
+ cgcx: &CodegenContext<B>,
+ module: ModuleCodegen<B::Module>,
+ module_config: &ModuleConfig,
+) -> Result<WorkItemResult<B>, FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+
+ unsafe {
+ B::optimize(cgcx, &diag_handler, &module, module_config)?;
+ }
+
+ // After we've done the initial round of optimizations we need to
+ // decide whether to synchronously codegen this module or ship it
+ // back to the coordinator thread for further LTO processing (which
+ // has to wait for all the initial modules to be optimized).
+
+ let lto_type = compute_per_cgu_lto_type(&cgcx.lto, &cgcx.opts, &cgcx.crate_types, module.kind);
+
+ // If we're doing some form of incremental LTO then we need to be sure to
+ // save our module to disk first.
+ let bitcode = if cgcx.config(module.kind).emit_pre_lto_bc {
+ let filename = pre_lto_bitcode_filename(&module.name);
+ cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename))
+ } else {
+ None
+ };
+
+ match lto_type {
+ ComputedLtoType::No => finish_intra_module_work(cgcx, module, module_config),
+ ComputedLtoType::Thin => {
+ let (name, thin_buffer) = B::prepare_thin(module);
+ if let Some(path) = bitcode {
+ fs::write(&path, thin_buffer.data()).unwrap_or_else(|e| {
+ panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
+ });
+ }
+ Ok(WorkItemResult::NeedsThinLTO(name, thin_buffer))
+ }
+ ComputedLtoType::Fat => match bitcode {
+ Some(path) => {
+ let (name, buffer) = B::serialize_module(module);
+ fs::write(&path, buffer.data()).unwrap_or_else(|e| {
+ panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
+ });
+ Ok(WorkItemResult::NeedsFatLTO(FatLTOInput::Serialized { name, buffer }))
+ }
+ None => Ok(WorkItemResult::NeedsFatLTO(FatLTOInput::InMemory(module))),
+ },
+ }
+}
+
+fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
+ cgcx: &CodegenContext<B>,
+ module: CachedModuleCodegen,
+ module_config: &ModuleConfig,
+) -> WorkItemResult<B> {
+ assert!(module_config.emit_obj != EmitObj::None);
+
+ let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
+
+ let load_from_incr_comp_dir = |output_path: PathBuf, saved_path: &str| {
+ let source_file = in_incr_comp_dir(&incr_comp_session_dir, saved_path);
+ debug!(
+ "copying pre-existing module `{}` from {:?} to {}",
+ module.name,
+ source_file,
+ output_path.display()
+ );
+ match link_or_copy(&source_file, &output_path) {
+ Ok(_) => Some(output_path),
+ Err(err) => {
+ let diag_handler = cgcx.create_diag_handler();
+ diag_handler.err(&format!(
+ "unable to copy {} to {}: {}",
+ source_file.display(),
+ output_path.display(),
+ err
+ ));
+ None
+ }
+ }
+ };
+
+ let object = load_from_incr_comp_dir(
+ cgcx.output_filenames.temp_path(OutputType::Object, Some(&module.name)),
+ &module.source.saved_files.get("o").expect("no saved object file in work product"),
+ );
+ let dwarf_object =
+ module.source.saved_files.get("dwo").as_ref().and_then(|saved_dwarf_object_file| {
+ let dwarf_obj_out = cgcx
+ .output_filenames
+ .split_dwarf_path(cgcx.split_debuginfo, cgcx.split_dwarf_kind, Some(&module.name))
+ .expect(
+ "saved dwarf object in work product but `split_dwarf_path` returned `None`",
+ );
+ load_from_incr_comp_dir(dwarf_obj_out, &saved_dwarf_object_file)
+ });
+
+ WorkItemResult::Compiled(CompiledModule {
+ name: module.name,
+ kind: ModuleKind::Regular,
+ object,
+ dwarf_object,
+ bytecode: None,
+ })
+}
+
+fn execute_lto_work_item<B: ExtraBackendMethods>(
+ cgcx: &CodegenContext<B>,
+ module: lto::LtoModuleCodegen<B>,
+ module_config: &ModuleConfig,
+) -> Result<WorkItemResult<B>, FatalError> {
+ let module = unsafe { module.optimize(cgcx)? };
+ finish_intra_module_work(cgcx, module, module_config)
+}
+
+fn finish_intra_module_work<B: ExtraBackendMethods>(
+ cgcx: &CodegenContext<B>,
+ module: ModuleCodegen<B::Module>,
+ module_config: &ModuleConfig,
+) -> Result<WorkItemResult<B>, FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+
+ if !cgcx.opts.unstable_opts.combine_cgu
+ || module.kind == ModuleKind::Metadata
+ || module.kind == ModuleKind::Allocator
+ {
+ let module = unsafe { B::codegen(cgcx, &diag_handler, module, module_config)? };
+ Ok(WorkItemResult::Compiled(module))
+ } else {
+ Ok(WorkItemResult::NeedsLink(module))
+ }
+}
+
+pub enum Message<B: WriteBackendMethods> {
+ Token(io::Result<Acquired>),
+ NeedsFatLTO {
+ result: FatLTOInput<B>,
+ worker_id: usize,
+ },
+ NeedsThinLTO {
+ name: String,
+ thin_buffer: B::ThinBuffer,
+ worker_id: usize,
+ },
+ NeedsLink {
+ module: ModuleCodegen<B::Module>,
+ worker_id: usize,
+ },
+ Done {
+ result: Result<CompiledModule, Option<WorkerFatalError>>,
+ worker_id: usize,
+ },
+ CodegenDone {
+ llvm_work_item: WorkItem<B>,
+ cost: u64,
+ },
+ AddImportOnlyModule {
+ module_data: SerializedModule<B::ModuleBuffer>,
+ work_product: WorkProduct,
+ },
+ CodegenComplete,
+ CodegenItem,
+ CodegenAborted,
+}
+
+struct Diagnostic {
+ msg: String,
+ code: Option<DiagnosticId>,
+ lvl: Level,
+}
+
+#[derive(PartialEq, Clone, Copy, Debug)]
+enum MainThreadWorkerState {
+ Idle,
+ Codegenning,
+ LLVMing,
+}
+
+fn start_executing_work<B: ExtraBackendMethods>(
+ backend: B,
+ tcx: TyCtxt<'_>,
+ crate_info: &CrateInfo,
+ shared_emitter: SharedEmitter,
+ codegen_worker_send: Sender<Message<B>>,
+ coordinator_receive: Receiver<Box<dyn Any + Send>>,
+ total_cgus: usize,
+ jobserver: Client,
+ regular_config: Arc<ModuleConfig>,
+ metadata_config: Arc<ModuleConfig>,
+ allocator_config: Arc<ModuleConfig>,
+ tx_to_llvm_workers: Sender<Box<dyn Any + Send>>,
+) -> thread::JoinHandle<Result<CompiledModules, ()>> {
+ let coordinator_send = tx_to_llvm_workers;
+ let sess = tcx.sess;
+
+ // Compute the set of symbols we need to retain when doing LTO (if we need to)
+ let exported_symbols = {
+ let mut exported_symbols = FxHashMap::default();
+
+ let copy_symbols = |cnum| {
+ let symbols = tcx
+ .exported_symbols(cnum)
+ .iter()
+ .map(|&(s, lvl)| (symbol_name_for_instance_in_crate(tcx, s, cnum), lvl))
+ .collect();
+ Arc::new(symbols)
+ };
+
+ match sess.lto() {
+ Lto::No => None,
+ Lto::ThinLocal => {
+ exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
+ Some(Arc::new(exported_symbols))
+ }
+ Lto::Fat | Lto::Thin => {
+ exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
+ for &cnum in tcx.crates(()).iter() {
+ exported_symbols.insert(cnum, copy_symbols(cnum));
+ }
+ Some(Arc::new(exported_symbols))
+ }
+ }
+ };
+
+ // First up, convert our jobserver into a helper thread so we can use normal
+ // mpsc channels to manage our messages and such.
+ // After we've requested tokens then we'll, when we can,
+ // get tokens on `coordinator_receive` which will
+ // get managed in the main loop below.
+ let coordinator_send2 = coordinator_send.clone();
+ let helper = jobserver
+ .into_helper_thread(move |token| {
+ drop(coordinator_send2.send(Box::new(Message::Token::<B>(token))));
+ })
+ .expect("failed to spawn helper thread");
+
+ let mut each_linked_rlib_for_lto = Vec::new();
+ drop(link::each_linked_rlib(crate_info, &mut |cnum, path| {
+ if link::ignored_for_lto(sess, crate_info, cnum) {
+ return;
+ }
+ each_linked_rlib_for_lto.push((cnum, path.to_path_buf()));
+ }));
+
+ let ol =
+ if tcx.sess.opts.unstable_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() {
+ // If we know that we won’t be doing codegen, create target machines without optimisation.
+ config::OptLevel::No
+ } else {
+ tcx.backend_optimization_level(())
+ };
+ let backend_features = tcx.global_backend_features(());
+ let cgcx = CodegenContext::<B> {
+ backend: backend.clone(),
+ crate_types: sess.crate_types().to_vec(),
+ each_linked_rlib_for_lto,
+ lto: sess.lto(),
+ fewer_names: sess.fewer_names(),
+ save_temps: sess.opts.cg.save_temps,
+ time_trace: sess.opts.unstable_opts.llvm_time_trace,
+ opts: Arc::new(sess.opts.clone()),
+ prof: sess.prof.clone(),
+ exported_symbols,
+ remark: sess.opts.cg.remark.clone(),
+ worker: 0,
+ incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
+ cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(),
+ coordinator_send,
+ diag_emitter: shared_emitter.clone(),
+ output_filenames: tcx.output_filenames(()).clone(),
+ regular_module_config: regular_config,
+ metadata_module_config: metadata_config,
+ allocator_module_config: allocator_config,
+ tm_factory: backend.target_machine_factory(tcx.sess, ol, backend_features),
+ total_cgus,
+ msvc_imps_needed: msvc_imps_needed(tcx),
+ is_pe_coff: tcx.sess.target.is_like_windows,
+ target_can_use_split_dwarf: tcx.sess.target_can_use_split_dwarf(),
+ target_pointer_width: tcx.sess.target.pointer_width,
+ target_arch: tcx.sess.target.arch.to_string(),
+ debuginfo: tcx.sess.opts.debuginfo,
+ split_debuginfo: tcx.sess.split_debuginfo(),
+ split_dwarf_kind: tcx.sess.opts.unstable_opts.split_dwarf_kind,
+ };
+
+ // This is the "main loop" of parallel work happening for parallel codegen.
+ // It's here that we manage parallelism, schedule work, and work with
+ // messages coming from clients.
+ //
+ // There are a few environmental pre-conditions that shape how the system
+ // is set up:
+ //
+ // - Error reporting only can happen on the main thread because that's the
+ // only place where we have access to the compiler `Session`.
+ // - LLVM work can be done on any thread.
+ // - Codegen can only happen on the main thread.
+ // - Each thread doing substantial work must be in possession of a `Token`
+ // from the `Jobserver`.
+ // - The compiler process always holds one `Token`. Any additional `Tokens`
+ // have to be requested from the `Jobserver`.
+ //
+ // Error Reporting
+ // ===============
+ // The error reporting restriction is handled separately from the rest: We
+ // set up a `SharedEmitter` the holds an open channel to the main thread.
+ // When an error occurs on any thread, the shared emitter will send the
+ // error message to the receiver main thread (`SharedEmitterMain`). The
+ // main thread will periodically query this error message queue and emit
+ // any error messages it has received. It might even abort compilation if
+ // has received a fatal error. In this case we rely on all other threads
+ // being torn down automatically with the main thread.
+ // Since the main thread will often be busy doing codegen work, error
+ // reporting will be somewhat delayed, since the message queue can only be
+ // checked in between to work packages.
+ //
+ // Work Processing Infrastructure
+ // ==============================
+ // The work processing infrastructure knows three major actors:
+ //
+ // - the coordinator thread,
+ // - the main thread, and
+ // - LLVM worker threads
+ //
+ // The coordinator thread is running a message loop. It instructs the main
+ // thread about what work to do when, and it will spawn off LLVM worker
+ // threads as open LLVM WorkItems become available.
+ //
+ // The job of the main thread is to codegen CGUs into LLVM work package
+ // (since the main thread is the only thread that can do this). The main
+ // thread will block until it receives a message from the coordinator, upon
+ // which it will codegen one CGU, send it to the coordinator and block
+ // again. This way the coordinator can control what the main thread is
+ // doing.
+ //
+ // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is
+ // available, it will spawn off a new LLVM worker thread and let it process
+ // that a WorkItem. When a LLVM worker thread is done with its WorkItem,
+ // it will just shut down, which also frees all resources associated with
+ // the given LLVM module, and sends a message to the coordinator that the
+ // has been completed.
+ //
+ // Work Scheduling
+ // ===============
+ // The scheduler's goal is to minimize the time it takes to complete all
+ // work there is, however, we also want to keep memory consumption low
+ // if possible. These two goals are at odds with each other: If memory
+ // consumption were not an issue, we could just let the main thread produce
+ // LLVM WorkItems at full speed, assuring maximal utilization of
+ // Tokens/LLVM worker threads. However, since codegen is usually faster
+ // than LLVM processing, the queue of LLVM WorkItems would fill up and each
+ // WorkItem potentially holds on to a substantial amount of memory.
+ //
+ // So the actual goal is to always produce just enough LLVM WorkItems as
+ // not to starve our LLVM worker threads. That means, once we have enough
+ // WorkItems in our queue, we can block the main thread, so it does not
+ // produce more until we need them.
+ //
+ // Doing LLVM Work on the Main Thread
+ // ----------------------------------
+ // Since the main thread owns the compiler processes implicit `Token`, it is
+ // wasteful to keep it blocked without doing any work. Therefore, what we do
+ // in this case is: We spawn off an additional LLVM worker thread that helps
+ // reduce the queue. The work it is doing corresponds to the implicit
+ // `Token`. The coordinator will mark the main thread as being busy with
+ // LLVM work. (The actual work happens on another OS thread but we just care
+ // about `Tokens`, not actual threads).
+ //
+ // When any LLVM worker thread finishes while the main thread is marked as
+ // "busy with LLVM work", we can do a little switcheroo: We give the Token
+ // of the just finished thread to the LLVM worker thread that is working on
+ // behalf of the main thread's implicit Token, thus freeing up the main
+ // thread again. The coordinator can then again decide what the main thread
+ // should do. This allows the coordinator to make decisions at more points
+ // in time.
+ //
+ // Striking a Balance between Throughput and Memory Consumption
+ // ------------------------------------------------------------
+ // Since our two goals, (1) use as many Tokens as possible and (2) keep
+ // memory consumption as low as possible, are in conflict with each other,
+ // we have to find a trade off between them. Right now, the goal is to keep
+ // all workers busy, which means that no worker should find the queue empty
+ // when it is ready to start.
+ // How do we do achieve this? Good question :) We actually never know how
+ // many `Tokens` are potentially available so it's hard to say how much to
+ // fill up the queue before switching the main thread to LLVM work. Also we
+ // currently don't have a means to estimate how long a running LLVM worker
+ // will still be busy with it's current WorkItem. However, we know the
+ // maximal count of available Tokens that makes sense (=the number of CPU
+ // cores), so we can take a conservative guess. The heuristic we use here
+ // is implemented in the `queue_full_enough()` function.
+ //
+ // Some Background on Jobservers
+ // -----------------------------
+ // It's worth also touching on the management of parallelism here. We don't
+ // want to just spawn a thread per work item because while that's optimal
+ // parallelism it may overload a system with too many threads or violate our
+ // configuration for the maximum amount of cpu to use for this process. To
+ // manage this we use the `jobserver` crate.
+ //
+ // Job servers are an artifact of GNU make and are used to manage
+ // parallelism between processes. A jobserver is a glorified IPC semaphore
+ // basically. Whenever we want to run some work we acquire the semaphore,
+ // and whenever we're done with that work we release the semaphore. In this
+ // manner we can ensure that the maximum number of parallel workers is
+ // capped at any one point in time.
+ //
+ // LTO and the coordinator thread
+ // ------------------------------
+ //
+ // The final job the coordinator thread is responsible for is managing LTO
+ // and how that works. When LTO is requested what we'll to is collect all
+ // optimized LLVM modules into a local vector on the coordinator. Once all
+ // modules have been codegened and optimized we hand this to the `lto`
+ // module for further optimization. The `lto` module will return back a list
+ // of more modules to work on, which the coordinator will continue to spawn
+ // work for.
+ //
+ // Each LLVM module is automatically sent back to the coordinator for LTO if
+ // necessary. There's already optimizations in place to avoid sending work
+ // back to the coordinator if LTO isn't requested.
+ return B::spawn_thread(cgcx.time_trace, move || {
+ let mut worker_id_counter = 0;
+ let mut free_worker_ids = Vec::new();
+ let mut get_worker_id = |free_worker_ids: &mut Vec<usize>| {
+ if let Some(id) = free_worker_ids.pop() {
+ id
+ } else {
+ let id = worker_id_counter;
+ worker_id_counter += 1;
+ id
+ }
+ };
+
+ // This is where we collect codegen units that have gone all the way
+ // through codegen and LLVM.
+ let mut compiled_modules = vec![];
+ let mut compiled_allocator_module = None;
+ let mut needs_link = Vec::new();
+ let mut needs_fat_lto = Vec::new();
+ let mut needs_thin_lto = Vec::new();
+ let mut lto_import_only_modules = Vec::new();
+ let mut started_lto = false;
+ let mut codegen_aborted = false;
+
+ // This flag tracks whether all items have gone through codegens
+ let mut codegen_done = false;
+
+ // This is the queue of LLVM work items that still need processing.
+ let mut work_items = Vec::<(WorkItem<B>, u64)>::new();
+
+ // This are the Jobserver Tokens we currently hold. Does not include
+ // the implicit Token the compiler process owns no matter what.
+ let mut tokens = Vec::new();
+
+ let mut main_thread_worker_state = MainThreadWorkerState::Idle;
+ let mut running = 0;
+
+ let prof = &cgcx.prof;
+ let mut llvm_start_time: Option<VerboseTimingGuard<'_>> = None;
+
+ // Run the message loop while there's still anything that needs message
+ // processing. Note that as soon as codegen is aborted we simply want to
+ // wait for all existing work to finish, so many of the conditions here
+ // only apply if codegen hasn't been aborted as they represent pending
+ // work to be done.
+ while !codegen_done
+ || running > 0
+ || main_thread_worker_state == MainThreadWorkerState::LLVMing
+ || (!codegen_aborted
+ && !(work_items.is_empty()
+ && needs_fat_lto.is_empty()
+ && needs_thin_lto.is_empty()
+ && lto_import_only_modules.is_empty()
+ && main_thread_worker_state == MainThreadWorkerState::Idle))
+ {
+ // While there are still CGUs to be codegened, the coordinator has
+ // to decide how to utilize the compiler processes implicit Token:
+ // For codegenning more CGU or for running them through LLVM.
+ if !codegen_done {
+ if main_thread_worker_state == MainThreadWorkerState::Idle {
+ // Compute the number of workers that will be running once we've taken as many
+ // items from the work queue as we can, plus one for the main thread. It's not
+ // critically important that we use this instead of just `running`, but it
+ // prevents the `queue_full_enough` heuristic from fluctuating just because a
+ // worker finished up and we decreased the `running` count, even though we're
+ // just going to increase it right after this when we put a new worker to work.
+ let extra_tokens = tokens.len().checked_sub(running).unwrap();
+ let additional_running = std::cmp::min(extra_tokens, work_items.len());
+ let anticipated_running = running + additional_running + 1;
+
+ if !queue_full_enough(work_items.len(), anticipated_running) {
+ // The queue is not full enough, codegen more items:
+ if codegen_worker_send.send(Message::CodegenItem).is_err() {
+ panic!("Could not send Message::CodegenItem to main thread")
+ }
+ main_thread_worker_state = MainThreadWorkerState::Codegenning;
+ } else {
+ // The queue is full enough to not let the worker
+ // threads starve. Use the implicit Token to do some
+ // LLVM work too.
+ let (item, _) =
+ work_items.pop().expect("queue empty - queue_full_enough() broken?");
+ let cgcx = CodegenContext {
+ worker: get_worker_id(&mut free_worker_ids),
+ ..cgcx.clone()
+ };
+ maybe_start_llvm_timer(
+ prof,
+ cgcx.config(item.module_kind()),
+ &mut llvm_start_time,
+ );
+ main_thread_worker_state = MainThreadWorkerState::LLVMing;
+ spawn_work(cgcx, item);
+ }
+ }
+ } else if codegen_aborted {
+ // don't queue up any more work if codegen was aborted, we're
+ // just waiting for our existing children to finish
+ } else {
+ // If we've finished everything related to normal codegen
+ // then it must be the case that we've got some LTO work to do.
+ // Perform the serial work here of figuring out what we're
+ // going to LTO and then push a bunch of work items onto our
+ // queue to do LTO
+ if work_items.is_empty()
+ && running == 0
+ && main_thread_worker_state == MainThreadWorkerState::Idle
+ {
+ assert!(!started_lto);
+ started_lto = true;
+
+ let needs_fat_lto = mem::take(&mut needs_fat_lto);
+ let needs_thin_lto = mem::take(&mut needs_thin_lto);
+ let import_only_modules = mem::take(&mut lto_import_only_modules);
+
+ for (work, cost) in
+ generate_lto_work(&cgcx, needs_fat_lto, needs_thin_lto, import_only_modules)
+ {
+ let insertion_index = work_items
+ .binary_search_by_key(&cost, |&(_, cost)| cost)
+ .unwrap_or_else(|e| e);
+ work_items.insert(insertion_index, (work, cost));
+ if !cgcx.opts.unstable_opts.no_parallel_llvm {
+ helper.request_token();
+ }
+ }
+ }
+
+ // In this branch, we know that everything has been codegened,
+ // so it's just a matter of determining whether the implicit
+ // Token is free to use for LLVM work.
+ match main_thread_worker_state {
+ MainThreadWorkerState::Idle => {
+ if let Some((item, _)) = work_items.pop() {
+ let cgcx = CodegenContext {
+ worker: get_worker_id(&mut free_worker_ids),
+ ..cgcx.clone()
+ };
+ maybe_start_llvm_timer(
+ prof,
+ cgcx.config(item.module_kind()),
+ &mut llvm_start_time,
+ );
+ main_thread_worker_state = MainThreadWorkerState::LLVMing;
+ spawn_work(cgcx, item);
+ } else {
+ // There is no unstarted work, so let the main thread
+ // take over for a running worker. Otherwise the
+ // implicit token would just go to waste.
+ // We reduce the `running` counter by one. The
+ // `tokens.truncate()` below will take care of
+ // giving the Token back.
+ debug_assert!(running > 0);
+ running -= 1;
+ main_thread_worker_state = MainThreadWorkerState::LLVMing;
+ }
+ }
+ MainThreadWorkerState::Codegenning => bug!(
+ "codegen worker should not be codegenning after \
+ codegen was already completed"
+ ),
+ MainThreadWorkerState::LLVMing => {
+ // Already making good use of that token
+ }
+ }
+ }
+
+ // Spin up what work we can, only doing this while we've got available
+ // parallelism slots and work left to spawn.
+ while !codegen_aborted && !work_items.is_empty() && running < tokens.len() {
+ let (item, _) = work_items.pop().unwrap();
+
+ maybe_start_llvm_timer(prof, cgcx.config(item.module_kind()), &mut llvm_start_time);
+
+ let cgcx =
+ CodegenContext { worker: get_worker_id(&mut free_worker_ids), ..cgcx.clone() };
+
+ spawn_work(cgcx, item);
+ running += 1;
+ }
+
+ // Relinquish accidentally acquired extra tokens
+ tokens.truncate(running);
+
+ // If a thread exits successfully then we drop a token associated
+ // with that worker and update our `running` count. We may later
+ // re-acquire a token to continue running more work. We may also not
+ // actually drop a token here if the worker was running with an
+ // "ephemeral token"
+ let mut free_worker = |worker_id| {
+ if main_thread_worker_state == MainThreadWorkerState::LLVMing {
+ main_thread_worker_state = MainThreadWorkerState::Idle;
+ } else {
+ running -= 1;
+ }
+
+ free_worker_ids.push(worker_id);
+ };
+
+ let msg = coordinator_receive.recv().unwrap();
+ match *msg.downcast::<Message<B>>().ok().unwrap() {
+ // Save the token locally and the next turn of the loop will use
+ // this to spawn a new unit of work, or it may get dropped
+ // immediately if we have no more work to spawn.
+ Message::Token(token) => {
+ match token {
+ Ok(token) => {
+ tokens.push(token);
+
+ if main_thread_worker_state == MainThreadWorkerState::LLVMing {
+ // If the main thread token is used for LLVM work
+ // at the moment, we turn that thread into a regular
+ // LLVM worker thread, so the main thread is free
+ // to react to codegen demand.
+ main_thread_worker_state = MainThreadWorkerState::Idle;
+ running += 1;
+ }
+ }
+ Err(e) => {
+ let msg = &format!("failed to acquire jobserver token: {}", e);
+ shared_emitter.fatal(msg);
+ // Exit the coordinator thread
+ panic!("{}", msg)
+ }
+ }
+ }
+
+ Message::CodegenDone { llvm_work_item, cost } => {
+ // We keep the queue sorted by estimated processing cost,
+ // so that more expensive items are processed earlier. This
+ // is good for throughput as it gives the main thread more
+ // time to fill up the queue and it avoids scheduling
+ // expensive items to the end.
+ // Note, however, that this is not ideal for memory
+ // consumption, as LLVM module sizes are not evenly
+ // distributed.
+ let insertion_index = work_items.binary_search_by_key(&cost, |&(_, cost)| cost);
+ let insertion_index = match insertion_index {
+ Ok(idx) | Err(idx) => idx,
+ };
+ work_items.insert(insertion_index, (llvm_work_item, cost));
+
+ if !cgcx.opts.unstable_opts.no_parallel_llvm {
+ helper.request_token();
+ }
+ assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
+ main_thread_worker_state = MainThreadWorkerState::Idle;
+ }
+
+ Message::CodegenComplete => {
+ codegen_done = true;
+ assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
+ main_thread_worker_state = MainThreadWorkerState::Idle;
+ }
+
+ // If codegen is aborted that means translation was aborted due
+ // to some normal-ish compiler error. In this situation we want
+ // to exit as soon as possible, but we want to make sure all
+ // existing work has finished. Flag codegen as being done, and
+ // then conditions above will ensure no more work is spawned but
+ // we'll keep executing this loop until `running` hits 0.
+ Message::CodegenAborted => {
+ codegen_done = true;
+ codegen_aborted = true;
+ }
+ Message::Done { result: Ok(compiled_module), worker_id } => {
+ free_worker(worker_id);
+ match compiled_module.kind {
+ ModuleKind::Regular => {
+ compiled_modules.push(compiled_module);
+ }
+ ModuleKind::Allocator => {
+ assert!(compiled_allocator_module.is_none());
+ compiled_allocator_module = Some(compiled_module);
+ }
+ ModuleKind::Metadata => bug!("Should be handled separately"),
+ }
+ }
+ Message::NeedsLink { module, worker_id } => {
+ free_worker(worker_id);
+ needs_link.push(module);
+ }
+ Message::NeedsFatLTO { result, worker_id } => {
+ assert!(!started_lto);
+ free_worker(worker_id);
+ needs_fat_lto.push(result);
+ }
+ Message::NeedsThinLTO { name, thin_buffer, worker_id } => {
+ assert!(!started_lto);
+ free_worker(worker_id);
+ needs_thin_lto.push((name, thin_buffer));
+ }
+ Message::AddImportOnlyModule { module_data, work_product } => {
+ assert!(!started_lto);
+ assert!(!codegen_done);
+ assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
+ lto_import_only_modules.push((module_data, work_product));
+ main_thread_worker_state = MainThreadWorkerState::Idle;
+ }
+ // If the thread failed that means it panicked, so we abort immediately.
+ Message::Done { result: Err(None), worker_id: _ } => {
+ bug!("worker thread panicked");
+ }
+ Message::Done { result: Err(Some(WorkerFatalError)), worker_id } => {
+ // Similar to CodegenAborted, wait for remaining work to finish.
+ free_worker(worker_id);
+ codegen_done = true;
+ codegen_aborted = true;
+ }
+ Message::CodegenItem => bug!("the coordinator should not receive codegen requests"),
+ }
+ }
+
+ if codegen_aborted {
+ return Err(());
+ }
+
+ let needs_link = mem::take(&mut needs_link);
+ if !needs_link.is_empty() {
+ assert!(compiled_modules.is_empty());
+ let diag_handler = cgcx.create_diag_handler();
+ let module = B::run_link(&cgcx, &diag_handler, needs_link).map_err(|_| ())?;
+ let module = unsafe {
+ B::codegen(&cgcx, &diag_handler, module, cgcx.config(ModuleKind::Regular))
+ .map_err(|_| ())?
+ };
+ compiled_modules.push(module);
+ }
+
+ // Drop to print timings
+ drop(llvm_start_time);
+
+ // Regardless of what order these modules completed in, report them to
+ // the backend in the same order every time to ensure that we're handing
+ // out deterministic results.
+ compiled_modules.sort_by(|a, b| a.name.cmp(&b.name));
+
+ Ok(CompiledModules {
+ modules: compiled_modules,
+ allocator_module: compiled_allocator_module,
+ })
+ });
+
+ // A heuristic that determines if we have enough LLVM WorkItems in the
+ // queue so that the main thread can do LLVM work instead of codegen
+ fn queue_full_enough(items_in_queue: usize, workers_running: usize) -> bool {
+ // This heuristic scales ahead-of-time codegen according to available
+ // concurrency, as measured by `workers_running`. The idea is that the
+ // more concurrency we have available, the more demand there will be for
+ // work items, and the fuller the queue should be kept to meet demand.
+ // An important property of this approach is that we codegen ahead of
+ // time only as much as necessary, so as to keep fewer LLVM modules in
+ // memory at once, thereby reducing memory consumption.
+ //
+ // When the number of workers running is less than the max concurrency
+ // available to us, this heuristic can cause us to instruct the main
+ // thread to work on an LLVM item (that is, tell it to "LLVM") instead
+ // of codegen, even though it seems like it *should* be codegenning so
+ // that we can create more work items and spawn more LLVM workers.
+ //
+ // But this is not a problem. When the main thread is told to LLVM,
+ // according to this heuristic and how work is scheduled, there is
+ // always at least one item in the queue, and therefore at least one
+ // pending jobserver token request. If there *is* more concurrency
+ // available, we will immediately receive a token, which will upgrade
+ // the main thread's LLVM worker to a real one (conceptually), and free
+ // up the main thread to codegen if necessary. On the other hand, if
+ // there isn't more concurrency, then the main thread working on an LLVM
+ // item is appropriate, as long as the queue is full enough for demand.
+ //
+ // Speaking of which, how full should we keep the queue? Probably less
+ // full than you'd think. A lot has to go wrong for the queue not to be
+ // full enough and for that to have a negative effect on compile times.
+ //
+ // Workers are unlikely to finish at exactly the same time, so when one
+ // finishes and takes another work item off the queue, we often have
+ // ample time to codegen at that point before the next worker finishes.
+ // But suppose that codegen takes so long that the workers exhaust the
+ // queue, and we have one or more workers that have nothing to work on.
+ // Well, it might not be so bad. Of all the LLVM modules we create and
+ // optimize, one has to finish last. It's not necessarily the case that
+ // by losing some concurrency for a moment, we delay the point at which
+ // that last LLVM module is finished and the rest of compilation can
+ // proceed. Also, when we can't take advantage of some concurrency, we
+ // give tokens back to the job server. That enables some other rustc to
+ // potentially make use of the available concurrency. That could even
+ // *decrease* overall compile time if we're lucky. But yes, if no other
+ // rustc can make use of the concurrency, then we've squandered it.
+ //
+ // However, keeping the queue full is also beneficial when we have a
+ // surge in available concurrency. Then items can be taken from the
+ // queue immediately, without having to wait for codegen.
+ //
+ // So, the heuristic below tries to keep one item in the queue for every
+ // four running workers. Based on limited benchmarking, this appears to
+ // be more than sufficient to avoid increasing compilation times.
+ let quarter_of_workers = workers_running - 3 * workers_running / 4;
+ items_in_queue > 0 && items_in_queue >= quarter_of_workers
+ }
+
+ fn maybe_start_llvm_timer<'a>(
+ prof: &'a SelfProfilerRef,
+ config: &ModuleConfig,
+ llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
+ ) {
+ if config.time_module && llvm_start_time.is_none() {
+ *llvm_start_time = Some(prof.extra_verbose_generic_activity("LLVM_passes", "crate"));
+ }
+ }
+}
+
+/// `FatalError` is explicitly not `Send`.
+#[must_use]
+pub struct WorkerFatalError;
+
+fn spawn_work<B: ExtraBackendMethods>(cgcx: CodegenContext<B>, work: WorkItem<B>) {
+ B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || {
+ // Set up a destructor which will fire off a message that we're done as
+ // we exit.
+ struct Bomb<B: ExtraBackendMethods> {
+ coordinator_send: Sender<Box<dyn Any + Send>>,
+ result: Option<Result<WorkItemResult<B>, FatalError>>,
+ worker_id: usize,
+ }
+ impl<B: ExtraBackendMethods> Drop for Bomb<B> {
+ fn drop(&mut self) {
+ let worker_id = self.worker_id;
+ let msg = match self.result.take() {
+ Some(Ok(WorkItemResult::Compiled(m))) => {
+ Message::Done::<B> { result: Ok(m), worker_id }
+ }
+ Some(Ok(WorkItemResult::NeedsLink(m))) => {
+ Message::NeedsLink::<B> { module: m, worker_id }
+ }
+ Some(Ok(WorkItemResult::NeedsFatLTO(m))) => {
+ Message::NeedsFatLTO::<B> { result: m, worker_id }
+ }
+ Some(Ok(WorkItemResult::NeedsThinLTO(name, thin_buffer))) => {
+ Message::NeedsThinLTO::<B> { name, thin_buffer, worker_id }
+ }
+ Some(Err(FatalError)) => {
+ Message::Done::<B> { result: Err(Some(WorkerFatalError)), worker_id }
+ }
+ None => Message::Done::<B> { result: Err(None), worker_id },
+ };
+ drop(self.coordinator_send.send(Box::new(msg)));
+ }
+ }
+
+ let mut bomb = Bomb::<B> {
+ coordinator_send: cgcx.coordinator_send.clone(),
+ result: None,
+ worker_id: cgcx.worker,
+ };
+
+ // Execute the work itself, and if it finishes successfully then flag
+ // ourselves as a success as well.
+ //
+ // Note that we ignore any `FatalError` coming out of `execute_work_item`,
+ // as a diagnostic was already sent off to the main thread - just
+ // surface that there was an error in this worker.
+ bomb.result = {
+ let _prof_timer = work.start_profiling(&cgcx);
+ Some(execute_work_item(&cgcx, work))
+ };
+ })
+ .expect("failed to spawn thread");
+}
+
+enum SharedEmitterMessage {
+ Diagnostic(Diagnostic),
+ InlineAsmError(u32, String, Level, Option<(String, Vec<InnerSpan>)>),
+ AbortIfErrors,
+ Fatal(String),
+}
+
+#[derive(Clone)]
+pub struct SharedEmitter {
+ sender: Sender<SharedEmitterMessage>,
+}
+
+pub struct SharedEmitterMain {
+ receiver: Receiver<SharedEmitterMessage>,
+}
+
+impl SharedEmitter {
+ pub fn new() -> (SharedEmitter, SharedEmitterMain) {
+ let (sender, receiver) = channel();
+
+ (SharedEmitter { sender }, SharedEmitterMain { receiver })
+ }
+
+ pub fn inline_asm_error(
+ &self,
+ cookie: u32,
+ msg: String,
+ level: Level,
+ source: Option<(String, Vec<InnerSpan>)>,
+ ) {
+ drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg, level, source)));
+ }
+
+ pub fn fatal(&self, msg: &str) {
+ drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string())));
+ }
+}
+
+impl Emitter for SharedEmitter {
+ fn emit_diagnostic(&mut self, diag: &rustc_errors::Diagnostic) {
+ let fluent_args = self.to_fluent_args(diag.args());
+ drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
+ msg: self.translate_messages(&diag.message, &fluent_args).to_string(),
+ code: diag.code.clone(),
+ lvl: diag.level(),
+ })));
+ for child in &diag.children {
+ drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
+ msg: self.translate_messages(&child.message, &fluent_args).to_string(),
+ code: None,
+ lvl: child.level,
+ })));
+ }
+ drop(self.sender.send(SharedEmitterMessage::AbortIfErrors));
+ }
+
+ fn source_map(&self) -> Option<&Lrc<SourceMap>> {
+ None
+ }
+
+ fn fluent_bundle(&self) -> Option<&Lrc<rustc_errors::FluentBundle>> {
+ None
+ }
+
+ fn fallback_fluent_bundle(&self) -> &rustc_errors::FluentBundle {
+ panic!("shared emitter attempted to translate a diagnostic");
+ }
+}
+
+impl SharedEmitterMain {
+ pub fn check(&self, sess: &Session, blocking: bool) {
+ loop {
+ let message = if blocking {
+ match self.receiver.recv() {
+ Ok(message) => Ok(message),
+ Err(_) => Err(()),
+ }
+ } else {
+ match self.receiver.try_recv() {
+ Ok(message) => Ok(message),
+ Err(_) => Err(()),
+ }
+ };
+
+ match message {
+ Ok(SharedEmitterMessage::Diagnostic(diag)) => {
+ let handler = sess.diagnostic();
+ let mut d = rustc_errors::Diagnostic::new(diag.lvl, &diag.msg);
+ if let Some(code) = diag.code {
+ d.code(code);
+ }
+ handler.emit_diagnostic(&mut d);
+ }
+ Ok(SharedEmitterMessage::InlineAsmError(cookie, msg, level, source)) => {
+ let msg = msg.strip_prefix("error: ").unwrap_or(&msg);
+
+ let mut err = match level {
+ Level::Error { lint: false } => sess.struct_err(msg).forget_guarantee(),
+ Level::Warning(_) => sess.struct_warn(msg),
+ Level::Note => sess.struct_note_without_error(msg),
+ _ => bug!("Invalid inline asm diagnostic level"),
+ };
+
+ // If the cookie is 0 then we don't have span information.
+ if cookie != 0 {
+ let pos = BytePos::from_u32(cookie);
+ let span = Span::with_root_ctxt(pos, pos);
+ err.set_span(span);
+ };
+
+ // Point to the generated assembly if it is available.
+ if let Some((buffer, spans)) = source {
+ let source = sess
+ .source_map()
+ .new_source_file(FileName::inline_asm_source_code(&buffer), buffer);
+ let source_span = Span::with_root_ctxt(source.start_pos, source.end_pos);
+ let spans: Vec<_> =
+ spans.iter().map(|sp| source_span.from_inner(*sp)).collect();
+ err.span_note(spans, "instantiated into assembly here");
+ }
+
+ err.emit();
+ }
+ Ok(SharedEmitterMessage::AbortIfErrors) => {
+ sess.abort_if_errors();
+ }
+ Ok(SharedEmitterMessage::Fatal(msg)) => {
+ sess.fatal(&msg);
+ }
+ Err(_) => {
+ break;
+ }
+ }
+ }
+ }
+}
+
+pub struct Coordinator<B: ExtraBackendMethods> {
+ pub sender: Sender<Box<dyn Any + Send>>,
+ future: Option<thread::JoinHandle<Result<CompiledModules, ()>>>,
+ // Only used for the Message type.
+ phantom: PhantomData<B>,
+}
+
+impl<B: ExtraBackendMethods> Coordinator<B> {
+ fn join(mut self) -> std::thread::Result<Result<CompiledModules, ()>> {
+ self.future.take().unwrap().join()
+ }
+}
+
+impl<B: ExtraBackendMethods> Drop for Coordinator<B> {
+ fn drop(&mut self) {
+ if let Some(future) = self.future.take() {
+ // If we haven't joined yet, signal to the coordinator that it should spawn no more
+ // work, and wait for worker threads to finish.
+ drop(self.sender.send(Box::new(Message::CodegenAborted::<B>)));
+ drop(future.join());
+ }
+ }
+}
+
+pub struct OngoingCodegen<B: ExtraBackendMethods> {
+ pub backend: B,
+ pub metadata: EncodedMetadata,
+ pub metadata_module: Option<CompiledModule>,
+ pub crate_info: CrateInfo,
+ pub codegen_worker_receive: Receiver<Message<B>>,
+ pub shared_emitter_main: SharedEmitterMain,
+ pub output_filenames: Arc<OutputFilenames>,
+ pub coordinator: Coordinator<B>,
+}
+
+impl<B: ExtraBackendMethods> OngoingCodegen<B> {
+ pub fn join(self, sess: &Session) -> (CodegenResults, FxHashMap<WorkProductId, WorkProduct>) {
+ let _timer = sess.timer("finish_ongoing_codegen");
+
+ self.shared_emitter_main.check(sess, true);
+ let compiled_modules = sess.time("join_worker_thread", || match self.coordinator.join() {
+ Ok(Ok(compiled_modules)) => compiled_modules,
+ Ok(Err(())) => {
+ sess.abort_if_errors();
+ panic!("expected abort due to worker thread errors")
+ }
+ Err(_) => {
+ bug!("panic during codegen/LLVM phase");
+ }
+ });
+
+ sess.cgu_reuse_tracker.check_expected_reuse(sess.diagnostic());
+
+ sess.abort_if_errors();
+
+ let work_products =
+ copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, &compiled_modules);
+ produce_final_output_artifacts(sess, &compiled_modules, &self.output_filenames);
+
+ // FIXME: time_llvm_passes support - does this use a global context or
+ // something?
+ if sess.codegen_units() == 1 && sess.time_llvm_passes() {
+ self.backend.print_pass_timings()
+ }
+
+ (
+ CodegenResults {
+ metadata: self.metadata,
+ crate_info: self.crate_info,
+
+ modules: compiled_modules.modules,
+ allocator_module: compiled_modules.allocator_module,
+ metadata_module: self.metadata_module,
+ },
+ work_products,
+ )
+ }
+
+ pub fn submit_pre_codegened_module_to_llvm(
+ &self,
+ tcx: TyCtxt<'_>,
+ module: ModuleCodegen<B::Module>,
+ ) {
+ self.wait_for_signal_to_codegen_item();
+ self.check_for_errors(tcx.sess);
+
+ // These are generally cheap and won't throw off scheduling.
+ let cost = 0;
+ submit_codegened_module_to_llvm(&self.backend, &self.coordinator.sender, module, cost);
+ }
+
+ pub fn codegen_finished(&self, tcx: TyCtxt<'_>) {
+ self.wait_for_signal_to_codegen_item();
+ self.check_for_errors(tcx.sess);
+ drop(self.coordinator.sender.send(Box::new(Message::CodegenComplete::<B>)));
+ }
+
+ pub fn check_for_errors(&self, sess: &Session) {
+ self.shared_emitter_main.check(sess, false);
+ }
+
+ pub fn wait_for_signal_to_codegen_item(&self) {
+ match self.codegen_worker_receive.recv() {
+ Ok(Message::CodegenItem) => {
+ // Nothing to do
+ }
+ Ok(_) => panic!("unexpected message"),
+ Err(_) => {
+ // One of the LLVM threads must have panicked, fall through so
+ // error handling can be reached.
+ }
+ }
+ }
+}
+
+pub fn submit_codegened_module_to_llvm<B: ExtraBackendMethods>(
+ _backend: &B,
+ tx_to_llvm_workers: &Sender<Box<dyn Any + Send>>,
+ module: ModuleCodegen<B::Module>,
+ cost: u64,
+) {
+ let llvm_work_item = WorkItem::Optimize(module);
+ drop(tx_to_llvm_workers.send(Box::new(Message::CodegenDone::<B> { llvm_work_item, cost })));
+}
+
+pub fn submit_post_lto_module_to_llvm<B: ExtraBackendMethods>(
+ _backend: &B,
+ tx_to_llvm_workers: &Sender<Box<dyn Any + Send>>,
+ module: CachedModuleCodegen,
+) {
+ let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module);
+ drop(tx_to_llvm_workers.send(Box::new(Message::CodegenDone::<B> { llvm_work_item, cost: 0 })));
+}
+
+pub fn submit_pre_lto_module_to_llvm<B: ExtraBackendMethods>(
+ _backend: &B,
+ tcx: TyCtxt<'_>,
+ tx_to_llvm_workers: &Sender<Box<dyn Any + Send>>,
+ module: CachedModuleCodegen,
+) {
+ let filename = pre_lto_bitcode_filename(&module.name);
+ let bc_path = in_incr_comp_dir_sess(tcx.sess, &filename);
+ let file = fs::File::open(&bc_path)
+ .unwrap_or_else(|e| panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e));
+
+ let mmap = unsafe {
+ Mmap::map(file).unwrap_or_else(|e| {
+ panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e)
+ })
+ };
+ // Schedule the module to be loaded
+ drop(tx_to_llvm_workers.send(Box::new(Message::AddImportOnlyModule::<B> {
+ module_data: SerializedModule::FromUncompressedFile(mmap),
+ work_product: module.source,
+ })));
+}
+
+pub fn pre_lto_bitcode_filename(module_name: &str) -> String {
+ format!("{}.{}", module_name, PRE_LTO_BC_EXT)
+}
+
+fn msvc_imps_needed(tcx: TyCtxt<'_>) -> bool {
+ // This should never be true (because it's not supported). If it is true,
+ // something is wrong with commandline arg validation.
+ assert!(
+ !(tcx.sess.opts.cg.linker_plugin_lto.enabled()
+ && tcx.sess.target.is_like_windows
+ && tcx.sess.opts.cg.prefer_dynamic)
+ );
+
+ tcx.sess.target.is_like_windows &&
+ tcx.sess.crate_types().iter().any(|ct| *ct == CrateType::Rlib) &&
+ // ThinLTO can't handle this workaround in all cases, so we don't
+ // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing
+ // dynamic linking when linker plugin LTO is enabled.
+ !tcx.sess.opts.cg.linker_plugin_lto.enabled()
+}
diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs
new file mode 100644
index 000000000..a840b2709
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/base.rs
@@ -0,0 +1,961 @@
+use crate::back::metadata::create_compressed_metadata_file;
+use crate::back::write::{
+ compute_per_cgu_lto_type, start_async_codegen, submit_codegened_module_to_llvm,
+ submit_post_lto_module_to_llvm, submit_pre_lto_module_to_llvm, ComputedLtoType, OngoingCodegen,
+};
+use crate::common::{IntPredicate, RealPredicate, TypeKind};
+use crate::meth;
+use crate::mir;
+use crate::mir::operand::OperandValue;
+use crate::mir::place::PlaceRef;
+use crate::traits::*;
+use crate::{CachedModuleCodegen, CompiledModule, CrateInfo, MemFlags, ModuleCodegen, ModuleKind};
+
+use rustc_attr as attr;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
+
+use rustc_data_structures::sync::par_iter;
+#[cfg(parallel_compiler)]
+use rustc_data_structures::sync::ParallelIterator;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_hir::lang_items::LangItem;
+use rustc_index::vec::Idx;
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
+use rustc_middle::middle::exported_symbols;
+use rustc_middle::middle::lang_items;
+use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, MonoItem};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
+use rustc_session::cgu_reuse_tracker::CguReuse;
+use rustc_session::config::{self, CrateType, EntryFnType, OutputType};
+use rustc_session::Session;
+use rustc_span::symbol::sym;
+use rustc_span::{DebuggerVisualizerFile, DebuggerVisualizerType};
+use rustc_target::abi::{Align, VariantIdx};
+
+use std::collections::BTreeSet;
+use std::convert::TryFrom;
+use std::time::{Duration, Instant};
+
+use itertools::Itertools;
+
+pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, signed: bool) -> IntPredicate {
+ match op {
+ hir::BinOpKind::Eq => IntPredicate::IntEQ,
+ hir::BinOpKind::Ne => IntPredicate::IntNE,
+ hir::BinOpKind::Lt => {
+ if signed {
+ IntPredicate::IntSLT
+ } else {
+ IntPredicate::IntULT
+ }
+ }
+ hir::BinOpKind::Le => {
+ if signed {
+ IntPredicate::IntSLE
+ } else {
+ IntPredicate::IntULE
+ }
+ }
+ hir::BinOpKind::Gt => {
+ if signed {
+ IntPredicate::IntSGT
+ } else {
+ IntPredicate::IntUGT
+ }
+ }
+ hir::BinOpKind::Ge => {
+ if signed {
+ IntPredicate::IntSGE
+ } else {
+ IntPredicate::IntUGE
+ }
+ }
+ op => bug!(
+ "comparison_op_to_icmp_predicate: expected comparison operator, \
+ found {:?}",
+ op
+ ),
+ }
+}
+
+pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate {
+ match op {
+ hir::BinOpKind::Eq => RealPredicate::RealOEQ,
+ hir::BinOpKind::Ne => RealPredicate::RealUNE,
+ hir::BinOpKind::Lt => RealPredicate::RealOLT,
+ hir::BinOpKind::Le => RealPredicate::RealOLE,
+ hir::BinOpKind::Gt => RealPredicate::RealOGT,
+ hir::BinOpKind::Ge => RealPredicate::RealOGE,
+ op => {
+ bug!(
+ "comparison_op_to_fcmp_predicate: expected comparison operator, \
+ found {:?}",
+ op
+ );
+ }
+ }
+}
+
+pub fn compare_simd_types<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ lhs: Bx::Value,
+ rhs: Bx::Value,
+ t: Ty<'tcx>,
+ ret_ty: Bx::Type,
+ op: hir::BinOpKind,
+) -> Bx::Value {
+ let signed = match t.kind() {
+ ty::Float(_) => {
+ let cmp = bin_op_to_fcmp_predicate(op);
+ let cmp = bx.fcmp(cmp, lhs, rhs);
+ return bx.sext(cmp, ret_ty);
+ }
+ ty::Uint(_) => false,
+ ty::Int(_) => true,
+ _ => bug!("compare_simd_types: invalid SIMD type"),
+ };
+
+ let cmp = bin_op_to_icmp_predicate(op, signed);
+ let cmp = bx.icmp(cmp, lhs, rhs);
+ // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
+ // to get the correctly sized type. This will compile to a single instruction
+ // once the IR is converted to assembly if the SIMD instruction is supported
+ // by the target architecture.
+ bx.sext(cmp, ret_ty)
+}
+
+/// Retrieves the information we are losing (making dynamic) in an unsizing
+/// adjustment.
+///
+/// The `old_info` argument is a bit odd. It is intended for use in an upcast,
+/// where the new vtable for an object will be derived from the old one.
+pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ source: Ty<'tcx>,
+ target: Ty<'tcx>,
+ old_info: Option<Bx::Value>,
+) -> Bx::Value {
+ let cx = bx.cx();
+ let (source, target) =
+ cx.tcx().struct_lockstep_tails_erasing_lifetimes(source, target, bx.param_env());
+ match (source.kind(), target.kind()) {
+ (&ty::Array(_, len), &ty::Slice(_)) => {
+ cx.const_usize(len.eval_usize(cx.tcx(), ty::ParamEnv::reveal_all()))
+ }
+ (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
+ let old_info =
+ old_info.expect("unsized_info: missing old info for trait upcasting coercion");
+ if data_a.principal_def_id() == data_b.principal_def_id() {
+ return old_info;
+ }
+
+ // trait upcasting coercion
+
+ let vptr_entry_idx =
+ cx.tcx().vtable_trait_upcasting_coercion_new_vptr_slot((source, target));
+
+ if let Some(entry_idx) = vptr_entry_idx {
+ let ptr_ty = cx.type_i8p();
+ let ptr_align = cx.tcx().data_layout.pointer_align.abi;
+ let llvtable = bx.pointercast(old_info, bx.type_ptr_to(ptr_ty));
+ let gep = bx.inbounds_gep(
+ ptr_ty,
+ llvtable,
+ &[bx.const_usize(u64::try_from(entry_idx).unwrap())],
+ );
+ let new_vptr = bx.load(ptr_ty, gep, ptr_align);
+ bx.nonnull_metadata(new_vptr);
+ // VTable loads are invariant.
+ bx.set_invariant_load(new_vptr);
+ new_vptr
+ } else {
+ old_info
+ }
+ }
+ (_, &ty::Dynamic(ref data, ..)) => {
+ let vtable_ptr_ty = cx.scalar_pair_element_backend_type(
+ cx.layout_of(cx.tcx().mk_mut_ptr(target)),
+ 1,
+ true,
+ );
+ cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), vtable_ptr_ty)
+ }
+ _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
+ }
+}
+
+/// Coerces `src` to `dst_ty`. `src_ty` must be a pointer.
+pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ src: Bx::Value,
+ src_ty: Ty<'tcx>,
+ dst_ty: Ty<'tcx>,
+ old_info: Option<Bx::Value>,
+) -> (Bx::Value, Bx::Value) {
+ debug!("unsize_ptr: {:?} => {:?}", src_ty, dst_ty);
+ match (src_ty.kind(), dst_ty.kind()) {
+ (&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
+ | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
+ assert_eq!(bx.cx().type_is_sized(a), old_info.is_none());
+ let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
+ (bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, old_info))
+ }
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+ assert_eq!(def_a, def_b);
+ let src_layout = bx.cx().layout_of(src_ty);
+ let dst_layout = bx.cx().layout_of(dst_ty);
+ if src_ty == dst_ty {
+ return (src, old_info.unwrap());
+ }
+ let mut result = None;
+ for i in 0..src_layout.fields.count() {
+ let src_f = src_layout.field(bx.cx(), i);
+ if src_f.is_zst() {
+ continue;
+ }
+
+ assert_eq!(src_layout.fields.offset(i).bytes(), 0);
+ assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
+ assert_eq!(src_layout.size, src_f.size);
+
+ let dst_f = dst_layout.field(bx.cx(), i);
+ assert_ne!(src_f.ty, dst_f.ty);
+ assert_eq!(result, None);
+ result = Some(unsize_ptr(bx, src, src_f.ty, dst_f.ty, old_info));
+ }
+ let (lldata, llextra) = result.unwrap();
+ let lldata_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true);
+ let llextra_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true);
+ // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
+ (bx.bitcast(lldata, lldata_ty), bx.bitcast(llextra, llextra_ty))
+ }
+ _ => bug!("unsize_ptr: called on bad types"),
+ }
+}
+
+/// Coerces `src`, which is a reference to a value of type `src_ty`,
+/// to a value of type `dst_ty`, and stores the result in `dst`.
+pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ src: PlaceRef<'tcx, Bx::Value>,
+ dst: PlaceRef<'tcx, Bx::Value>,
+) {
+ let src_ty = src.layout.ty;
+ let dst_ty = dst.layout.ty;
+ match (src_ty.kind(), dst_ty.kind()) {
+ (&ty::Ref(..), &ty::Ref(..) | &ty::RawPtr(..)) | (&ty::RawPtr(..), &ty::RawPtr(..)) => {
+ let (base, info) = match bx.load_operand(src).val {
+ OperandValue::Pair(base, info) => unsize_ptr(bx, base, src_ty, dst_ty, Some(info)),
+ OperandValue::Immediate(base) => unsize_ptr(bx, base, src_ty, dst_ty, None),
+ OperandValue::Ref(..) => bug!(),
+ };
+ OperandValue::Pair(base, info).store(bx, dst);
+ }
+
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+ assert_eq!(def_a, def_b);
+
+ for i in 0..def_a.variant(VariantIdx::new(0)).fields.len() {
+ let src_f = src.project_field(bx, i);
+ let dst_f = dst.project_field(bx, i);
+
+ if dst_f.layout.is_zst() {
+ continue;
+ }
+
+ if src_f.layout.ty == dst_f.layout.ty {
+ memcpy_ty(
+ bx,
+ dst_f.llval,
+ dst_f.align,
+ src_f.llval,
+ src_f.align,
+ src_f.layout,
+ MemFlags::empty(),
+ );
+ } else {
+ coerce_unsized_into(bx, src_f, dst_f);
+ }
+ }
+ }
+ _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty,),
+ }
+}
+
+pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ op: hir::BinOpKind,
+ lhs: Bx::Value,
+ rhs: Bx::Value,
+) -> Bx::Value {
+ cast_shift_rhs(bx, op, lhs, rhs)
+}
+
+fn cast_shift_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ op: hir::BinOpKind,
+ lhs: Bx::Value,
+ rhs: Bx::Value,
+) -> Bx::Value {
+ // Shifts may have any size int on the rhs
+ if op.is_shift() {
+ let mut rhs_llty = bx.cx().val_ty(rhs);
+ let mut lhs_llty = bx.cx().val_ty(lhs);
+ if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
+ rhs_llty = bx.cx().element_type(rhs_llty)
+ }
+ if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
+ lhs_llty = bx.cx().element_type(lhs_llty)
+ }
+ let rhs_sz = bx.cx().int_width(rhs_llty);
+ let lhs_sz = bx.cx().int_width(lhs_llty);
+ if lhs_sz < rhs_sz {
+ bx.trunc(rhs, lhs_llty)
+ } else if lhs_sz > rhs_sz {
+ // FIXME (#1877: If in the future shifting by negative
+ // values is no longer undefined then this is wrong.
+ bx.zext(rhs, lhs_llty)
+ } else {
+ rhs
+ }
+ } else {
+ rhs
+ }
+}
+
+/// Returns `true` if this session's target will use SEH-based unwinding.
+///
+/// This is only true for MSVC targets, and even then the 64-bit MSVC target
+/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
+/// 64-bit MinGW) instead of "full SEH".
+pub fn wants_msvc_seh(sess: &Session) -> bool {
+ sess.target.is_like_msvc
+}
+
+pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ dst: Bx::Value,
+ dst_align: Align,
+ src: Bx::Value,
+ src_align: Align,
+ layout: TyAndLayout<'tcx>,
+ flags: MemFlags,
+) {
+ let size = layout.size.bytes();
+ if size == 0 {
+ return;
+ }
+
+ bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
+}
+
+pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
+ cx: &'a Bx::CodegenCx,
+ instance: Instance<'tcx>,
+) {
+ // this is an info! to allow collecting monomorphization statistics
+ // and to allow finding the last function before LLVM aborts from
+ // release builds.
+ info!("codegen_instance({})", instance);
+
+ mir::codegen_mir::<Bx>(cx, instance);
+}
+
+/// Creates the `main` function which will initialize the rust runtime and call
+/// users main function.
+pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ cx: &'a Bx::CodegenCx,
+) -> Option<Bx::Function> {
+ let (main_def_id, entry_type) = cx.tcx().entry_fn(())?;
+ let main_is_local = main_def_id.is_local();
+ let instance = Instance::mono(cx.tcx(), main_def_id);
+
+ if main_is_local {
+ // We want to create the wrapper in the same codegen unit as Rust's main
+ // function.
+ if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
+ return None;
+ }
+ } else if !cx.codegen_unit().is_primary() {
+ // We want to create the wrapper only when the codegen unit is the primary one
+ return None;
+ }
+
+ let main_llfn = cx.get_fn_addr(instance);
+
+ let use_start_lang_item = EntryFnType::Start != entry_type;
+ let entry_fn = create_entry_fn::<Bx>(cx, main_llfn, main_def_id, use_start_lang_item);
+ return Some(entry_fn);
+
+ fn create_entry_fn<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ cx: &'a Bx::CodegenCx,
+ rust_main: Bx::Value,
+ rust_main_def_id: DefId,
+ use_start_lang_item: bool,
+ ) -> Bx::Function {
+ // The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
+ // depending on whether the target needs `argc` and `argv` to be passed in.
+ let llfty = if cx.sess().target.main_needs_argc_argv {
+ cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int())
+ } else {
+ cx.type_func(&[], cx.type_int())
+ };
+
+ let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output();
+ // Given that `main()` has no arguments,
+ // then its return type cannot have
+ // late-bound regions, since late-bound
+ // regions must appear in the argument
+ // listing.
+ let main_ret_ty = cx.tcx().normalize_erasing_regions(
+ ty::ParamEnv::reveal_all(),
+ main_ret_ty.no_bound_vars().unwrap(),
+ );
+
+ let Some(llfn) = cx.declare_c_main(llfty) else {
+ // FIXME: We should be smart and show a better diagnostic here.
+ let span = cx.tcx().def_span(rust_main_def_id);
+ cx.sess()
+ .struct_span_err(span, "entry symbol `main` declared multiple times")
+ .help("did you use `#[no_mangle]` on `fn main`? Use `#[start]` instead")
+ .emit();
+ cx.sess().abort_if_errors();
+ bug!();
+ };
+
+ // `main` should respect same config for frame pointer elimination as rest of code
+ cx.set_frame_pointer_type(llfn);
+ cx.apply_target_cpu_attr(llfn);
+
+ let llbb = Bx::append_block(&cx, llfn, "top");
+ let mut bx = Bx::build(&cx, llbb);
+
+ bx.insert_reference_to_gdb_debug_scripts_section_global();
+
+ let isize_ty = cx.type_isize();
+ let i8pp_ty = cx.type_ptr_to(cx.type_i8p());
+ let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx);
+
+ let (start_fn, start_ty, args) = if use_start_lang_item {
+ let start_def_id = cx.tcx().require_lang_item(LangItem::Start, None);
+ let start_fn = cx.get_fn_addr(
+ ty::Instance::resolve(
+ cx.tcx(),
+ ty::ParamEnv::reveal_all(),
+ start_def_id,
+ cx.tcx().intern_substs(&[main_ret_ty.into()]),
+ )
+ .unwrap()
+ .unwrap(),
+ );
+ let start_ty = cx.type_func(&[cx.val_ty(rust_main), isize_ty, i8pp_ty], isize_ty);
+ (start_fn, start_ty, vec![rust_main, arg_argc, arg_argv])
+ } else {
+ debug!("using user-defined start fn");
+ let start_ty = cx.type_func(&[isize_ty, i8pp_ty], isize_ty);
+ (rust_main, start_ty, vec![arg_argc, arg_argv])
+ };
+
+ let result = bx.call(start_ty, start_fn, &args, None);
+ let cast = bx.intcast(result, cx.type_int(), true);
+ bx.ret(cast);
+
+ llfn
+ }
+}
+
+/// Obtain the `argc` and `argv` values to pass to the rust start function.
+fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ cx: &'a Bx::CodegenCx,
+ bx: &mut Bx,
+) -> (Bx::Value, Bx::Value) {
+ if cx.sess().target.main_needs_argc_argv {
+ // Params from native `main()` used as args for rust start function
+ let param_argc = bx.get_param(0);
+ let param_argv = bx.get_param(1);
+ let arg_argc = bx.intcast(param_argc, cx.type_isize(), true);
+ let arg_argv = param_argv;
+ (arg_argc, arg_argv)
+ } else {
+ // The Rust start function doesn't need `argc` and `argv`, so just pass zeros.
+ let arg_argc = bx.const_int(cx.type_int(), 0);
+ let arg_argv = bx.const_null(cx.type_ptr_to(cx.type_i8p()));
+ (arg_argc, arg_argv)
+ }
+}
+
+/// This function returns all of the debugger visualizers specified for the
+/// current crate as well as all upstream crates transitively that match the
+/// `visualizer_type` specified.
+pub fn collect_debugger_visualizers_transitive(
+ tcx: TyCtxt<'_>,
+ visualizer_type: DebuggerVisualizerType,
+) -> BTreeSet<DebuggerVisualizerFile> {
+ tcx.debugger_visualizers(LOCAL_CRATE)
+ .iter()
+ .chain(
+ tcx.crates(())
+ .iter()
+ .filter(|&cnum| {
+ let used_crate_source = tcx.used_crate_source(*cnum);
+ used_crate_source.rlib.is_some() || used_crate_source.rmeta.is_some()
+ })
+ .flat_map(|&cnum| tcx.debugger_visualizers(cnum)),
+ )
+ .filter(|visualizer| visualizer.visualizer_type == visualizer_type)
+ .cloned()
+ .collect::<BTreeSet<_>>()
+}
+
+pub fn codegen_crate<B: ExtraBackendMethods>(
+ backend: B,
+ tcx: TyCtxt<'_>,
+ target_cpu: String,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+) -> OngoingCodegen<B> {
+ // Skip crate items and just output metadata in -Z no-codegen mode.
+ if tcx.sess.opts.unstable_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() {
+ let ongoing_codegen = start_async_codegen(backend, tcx, target_cpu, metadata, None, 1);
+
+ ongoing_codegen.codegen_finished(tcx);
+
+ ongoing_codegen.check_for_errors(tcx.sess);
+
+ return ongoing_codegen;
+ }
+
+ let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
+
+ // Run the monomorphization collector and partition the collected items into
+ // codegen units.
+ let codegen_units = tcx.collect_and_partition_mono_items(()).1;
+
+ // Force all codegen_unit queries so they are already either red or green
+ // when compile_codegen_unit accesses them. We are not able to re-execute
+ // the codegen_unit query from just the DepNode, so an unknown color would
+ // lead to having to re-execute compile_codegen_unit, possibly
+ // unnecessarily.
+ if tcx.dep_graph.is_fully_enabled() {
+ for cgu in codegen_units {
+ tcx.ensure().codegen_unit(cgu.name());
+ }
+ }
+
+ let metadata_module = if need_metadata_module {
+ // Emit compressed metadata object.
+ let metadata_cgu_name =
+ cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")).to_string();
+ tcx.sess.time("write_compressed_metadata", || {
+ let file_name =
+ tcx.output_filenames(()).temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
+ let data = create_compressed_metadata_file(
+ tcx.sess,
+ &metadata,
+ &exported_symbols::metadata_symbol_name(tcx),
+ );
+ if let Err(err) = std::fs::write(&file_name, data) {
+ tcx.sess.fatal(&format!("error writing metadata object file: {}", err));
+ }
+ Some(CompiledModule {
+ name: metadata_cgu_name,
+ kind: ModuleKind::Metadata,
+ object: Some(file_name),
+ dwarf_object: None,
+ bytecode: None,
+ })
+ })
+ } else {
+ None
+ };
+
+ let ongoing_codegen = start_async_codegen(
+ backend.clone(),
+ tcx,
+ target_cpu,
+ metadata,
+ metadata_module,
+ codegen_units.len(),
+ );
+
+ // Codegen an allocator shim, if necessary.
+ //
+ // If the crate doesn't have an `allocator_kind` set then there's definitely
+ // no shim to generate. Otherwise we also check our dependency graph for all
+ // our output crate types. If anything there looks like its a `Dynamic`
+ // linkage, then it's already got an allocator shim and we'll be using that
+ // one instead. If nothing exists then it's our job to generate the
+ // allocator!
+ let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
+ use rustc_middle::middle::dependency_format::Linkage;
+ list.iter().any(|&linkage| linkage == Linkage::Dynamic)
+ });
+ let allocator_module = if any_dynamic_crate {
+ None
+ } else if let Some(kind) = tcx.allocator_kind(()) {
+ let llmod_id =
+ cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("allocator")).to_string();
+ let module_llvm = tcx.sess.time("write_allocator_module", || {
+ backend.codegen_allocator(tcx, &llmod_id, kind, tcx.lang_items().oom().is_some())
+ });
+
+ Some(ModuleCodegen { name: llmod_id, module_llvm, kind: ModuleKind::Allocator })
+ } else {
+ None
+ };
+
+ if let Some(allocator_module) = allocator_module {
+ ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module);
+ }
+
+ // For better throughput during parallel processing by LLVM, we used to sort
+ // CGUs largest to smallest. This would lead to better thread utilization
+ // by, for example, preventing a large CGU from being processed last and
+ // having only one LLVM thread working while the rest remained idle.
+ //
+ // However, this strategy would lead to high memory usage, as it meant the
+ // LLVM-IR for all of the largest CGUs would be resident in memory at once.
+ //
+ // Instead, we can compromise by ordering CGUs such that the largest and
+ // smallest are first, second largest and smallest are next, etc. If there
+ // are large size variations, this can reduce memory usage significantly.
+ let codegen_units: Vec<_> = {
+ let mut sorted_cgus = codegen_units.iter().collect::<Vec<_>>();
+ sorted_cgus.sort_by_cached_key(|cgu| cgu.size_estimate());
+
+ let (first_half, second_half) = sorted_cgus.split_at(sorted_cgus.len() / 2);
+ second_half.iter().rev().interleave(first_half).copied().collect()
+ };
+
+ // Calculate the CGU reuse
+ let cgu_reuse = tcx.sess.time("find_cgu_reuse", || {
+ codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect::<Vec<_>>()
+ });
+
+ let mut total_codegen_time = Duration::new(0, 0);
+ let start_rss = tcx.sess.time_passes().then(|| get_resident_set_size());
+
+ // The non-parallel compiler can only translate codegen units to LLVM IR
+ // on a single thread, leading to a staircase effect where the N LLVM
+ // threads have to wait on the single codegen threads to generate work
+ // for them. The parallel compiler does not have this restriction, so
+ // we can pre-load the LLVM queue in parallel before handing off
+ // coordination to the OnGoingCodegen scheduler.
+ //
+ // This likely is a temporary measure. Once we don't have to support the
+ // non-parallel compiler anymore, we can compile CGUs end-to-end in
+ // parallel and get rid of the complicated scheduling logic.
+ let mut pre_compiled_cgus = if cfg!(parallel_compiler) {
+ tcx.sess.time("compile_first_CGU_batch", || {
+ // Try to find one CGU to compile per thread.
+ let cgus: Vec<_> = cgu_reuse
+ .iter()
+ .enumerate()
+ .filter(|&(_, reuse)| reuse == &CguReuse::No)
+ .take(tcx.sess.threads())
+ .collect();
+
+ // Compile the found CGUs in parallel.
+ let start_time = Instant::now();
+
+ let pre_compiled_cgus = par_iter(cgus)
+ .map(|(i, _)| {
+ let module = backend.compile_codegen_unit(tcx, codegen_units[i].name());
+ (i, module)
+ })
+ .collect();
+
+ total_codegen_time += start_time.elapsed();
+
+ pre_compiled_cgus
+ })
+ } else {
+ FxHashMap::default()
+ };
+
+ for (i, cgu) in codegen_units.iter().enumerate() {
+ ongoing_codegen.wait_for_signal_to_codegen_item();
+ ongoing_codegen.check_for_errors(tcx.sess);
+
+ let cgu_reuse = cgu_reuse[i];
+ tcx.sess.cgu_reuse_tracker.set_actual_reuse(cgu.name().as_str(), cgu_reuse);
+
+ match cgu_reuse {
+ CguReuse::No => {
+ let (module, cost) = if let Some(cgu) = pre_compiled_cgus.remove(&i) {
+ cgu
+ } else {
+ let start_time = Instant::now();
+ let module = backend.compile_codegen_unit(tcx, cgu.name());
+ total_codegen_time += start_time.elapsed();
+ module
+ };
+ // This will unwind if there are errors, which triggers our `AbortCodegenOnDrop`
+ // guard. Unfortunately, just skipping the `submit_codegened_module_to_llvm` makes
+ // compilation hang on post-monomorphization errors.
+ tcx.sess.abort_if_errors();
+
+ submit_codegened_module_to_llvm(
+ &backend,
+ &ongoing_codegen.coordinator.sender,
+ module,
+ cost,
+ );
+ false
+ }
+ CguReuse::PreLto => {
+ submit_pre_lto_module_to_llvm(
+ &backend,
+ tcx,
+ &ongoing_codegen.coordinator.sender,
+ CachedModuleCodegen {
+ name: cgu.name().to_string(),
+ source: cgu.previous_work_product(tcx),
+ },
+ );
+ true
+ }
+ CguReuse::PostLto => {
+ submit_post_lto_module_to_llvm(
+ &backend,
+ &ongoing_codegen.coordinator.sender,
+ CachedModuleCodegen {
+ name: cgu.name().to_string(),
+ source: cgu.previous_work_product(tcx),
+ },
+ );
+ true
+ }
+ };
+ }
+
+ ongoing_codegen.codegen_finished(tcx);
+
+ // Since the main thread is sometimes blocked during codegen, we keep track
+ // -Ztime-passes output manually.
+ if tcx.sess.time_passes() {
+ let end_rss = get_resident_set_size();
+
+ print_time_passes_entry(
+ "codegen_to_LLVM_IR",
+ total_codegen_time,
+ start_rss.unwrap(),
+ end_rss,
+ );
+ }
+
+ ongoing_codegen.check_for_errors(tcx.sess);
+ ongoing_codegen
+}
+
+impl CrateInfo {
+ pub fn new(tcx: TyCtxt<'_>, target_cpu: String) -> CrateInfo {
+ let exported_symbols = tcx
+ .sess
+ .crate_types()
+ .iter()
+ .map(|&c| (c, crate::back::linker::exported_symbols(tcx, c)))
+ .collect();
+ let linked_symbols = tcx
+ .sess
+ .crate_types()
+ .iter()
+ .map(|&c| (c, crate::back::linker::linked_symbols(tcx, c)))
+ .collect();
+ let local_crate_name = tcx.crate_name(LOCAL_CRATE);
+ let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
+ let subsystem = tcx.sess.first_attr_value_str_by_name(crate_attrs, sym::windows_subsystem);
+ let windows_subsystem = subsystem.map(|subsystem| {
+ if subsystem != sym::windows && subsystem != sym::console {
+ tcx.sess.fatal(&format!(
+ "invalid windows subsystem `{}`, only \
+ `windows` and `console` are allowed",
+ subsystem
+ ));
+ }
+ subsystem.to_string()
+ });
+
+ // This list is used when generating the command line to pass through to
+ // system linker. The linker expects undefined symbols on the left of the
+ // command line to be defined in libraries on the right, not the other way
+ // around. For more info, see some comments in the add_used_library function
+ // below.
+ //
+ // In order to get this left-to-right dependency ordering, we use the reverse
+ // postorder of all crates putting the leaves at the right-most positions.
+ let used_crates = tcx
+ .postorder_cnums(())
+ .iter()
+ .rev()
+ .copied()
+ .filter(|&cnum| !tcx.dep_kind(cnum).macros_only())
+ .collect();
+
+ let mut info = CrateInfo {
+ target_cpu,
+ exported_symbols,
+ linked_symbols,
+ local_crate_name,
+ compiler_builtins: None,
+ profiler_runtime: None,
+ is_no_builtins: Default::default(),
+ native_libraries: Default::default(),
+ used_libraries: tcx.native_libraries(LOCAL_CRATE).iter().map(Into::into).collect(),
+ crate_name: Default::default(),
+ used_crates,
+ used_crate_source: Default::default(),
+ lang_item_to_crate: Default::default(),
+ missing_lang_items: Default::default(),
+ dependency_formats: tcx.dependency_formats(()).clone(),
+ windows_subsystem,
+ natvis_debugger_visualizers: Default::default(),
+ };
+ let lang_items = tcx.lang_items();
+
+ let crates = tcx.crates(());
+
+ let n_crates = crates.len();
+ info.native_libraries.reserve(n_crates);
+ info.crate_name.reserve(n_crates);
+ info.used_crate_source.reserve(n_crates);
+ info.missing_lang_items.reserve(n_crates);
+
+ for &cnum in crates.iter() {
+ info.native_libraries
+ .insert(cnum, tcx.native_libraries(cnum).iter().map(Into::into).collect());
+ info.crate_name.insert(cnum, tcx.crate_name(cnum));
+
+ let used_crate_source = tcx.used_crate_source(cnum);
+ info.used_crate_source.insert(cnum, used_crate_source.clone());
+ if tcx.is_compiler_builtins(cnum) {
+ info.compiler_builtins = Some(cnum);
+ }
+ if tcx.is_profiler_runtime(cnum) {
+ info.profiler_runtime = Some(cnum);
+ }
+ if tcx.is_no_builtins(cnum) {
+ info.is_no_builtins.insert(cnum);
+ }
+ let missing = tcx.missing_lang_items(cnum);
+ for &item in missing.iter() {
+ if let Ok(id) = lang_items.require(item) {
+ info.lang_item_to_crate.insert(item, id.krate);
+ }
+ }
+
+ // No need to look for lang items that don't actually need to exist.
+ let missing =
+ missing.iter().cloned().filter(|&l| lang_items::required(tcx, l)).collect();
+ info.missing_lang_items.insert(cnum, missing);
+ }
+
+ let embed_visualizers = tcx.sess.crate_types().iter().any(|&crate_type| match crate_type {
+ CrateType::Executable | CrateType::Dylib | CrateType::Cdylib => {
+ // These are crate types for which we invoke the linker and can embed
+ // NatVis visualizers.
+ true
+ }
+ CrateType::ProcMacro => {
+ // We could embed NatVis for proc macro crates too (to improve the debugging
+ // experience for them) but it does not seem like a good default, since
+ // this is a rare use case and we don't want to slow down the common case.
+ false
+ }
+ CrateType::Staticlib | CrateType::Rlib => {
+ // We don't invoke the linker for these, so we don't need to collect the NatVis for them.
+ false
+ }
+ });
+
+ if tcx.sess.target.is_like_msvc && embed_visualizers {
+ info.natvis_debugger_visualizers =
+ collect_debugger_visualizers_transitive(tcx, DebuggerVisualizerType::Natvis);
+ }
+
+ info
+ }
+}
+
+pub fn provide(providers: &mut Providers) {
+ providers.backend_optimization_level = |tcx, cratenum| {
+ let for_speed = match tcx.sess.opts.optimize {
+ // If globally no optimisation is done, #[optimize] has no effect.
+ //
+ // This is done because if we ended up "upgrading" to `-O2` here, we’d populate the
+ // pass manager and it is likely that some module-wide passes (such as inliner or
+ // cross-function constant propagation) would ignore the `optnone` annotation we put
+ // on the functions, thus necessarily involving these functions into optimisations.
+ config::OptLevel::No => return config::OptLevel::No,
+ // If globally optimise-speed is already specified, just use that level.
+ config::OptLevel::Less => return config::OptLevel::Less,
+ config::OptLevel::Default => return config::OptLevel::Default,
+ config::OptLevel::Aggressive => return config::OptLevel::Aggressive,
+ // If globally optimize-for-size has been requested, use -O2 instead (if optimize(size)
+ // are present).
+ config::OptLevel::Size => config::OptLevel::Default,
+ config::OptLevel::SizeMin => config::OptLevel::Default,
+ };
+
+ let (defids, _) = tcx.collect_and_partition_mono_items(cratenum);
+ for id in &*defids {
+ let CodegenFnAttrs { optimize, .. } = tcx.codegen_fn_attrs(*id);
+ match optimize {
+ attr::OptimizeAttr::None => continue,
+ attr::OptimizeAttr::Size => continue,
+ attr::OptimizeAttr::Speed => {
+ return for_speed;
+ }
+ }
+ }
+ tcx.sess.opts.optimize
+ };
+}
+
+fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
+ if !tcx.dep_graph.is_fully_enabled() {
+ return CguReuse::No;
+ }
+
+ let work_product_id = &cgu.work_product_id();
+ if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
+ // We don't have anything cached for this CGU. This can happen
+ // if the CGU did not exist in the previous session.
+ return CguReuse::No;
+ }
+
+ // Try to mark the CGU as green. If it we can do so, it means that nothing
+ // affecting the LLVM module has changed and we can re-use a cached version.
+ // If we compile with any kind of LTO, this means we can re-use the bitcode
+ // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
+ // know that later). If we are not doing LTO, there is only one optimized
+ // version of each module, so we re-use that.
+ let dep_node = cgu.codegen_dep_node(tcx);
+ assert!(
+ !tcx.dep_graph.dep_node_exists(&dep_node),
+ "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
+ cgu.name()
+ );
+
+ if tcx.try_mark_green(&dep_node) {
+ // We can re-use either the pre- or the post-thinlto state. If no LTO is
+ // being performed then we can use post-LTO artifacts, otherwise we must
+ // reuse pre-LTO artifacts
+ match compute_per_cgu_lto_type(
+ &tcx.sess.lto(),
+ &tcx.sess.opts,
+ &tcx.sess.crate_types(),
+ ModuleKind::Regular,
+ ) {
+ ComputedLtoType::No => CguReuse::PostLto,
+ _ => CguReuse::PreLto,
+ }
+ } else {
+ CguReuse::No
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/common.rs b/compiler/rustc_codegen_ssa/src/common.rs
new file mode 100644
index 000000000..8ca1a6084
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/common.rs
@@ -0,0 +1,223 @@
+#![allow(non_camel_case_types)]
+
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::LangItem;
+use rustc_middle::mir::interpret::ConstValue;
+use rustc_middle::ty::{self, layout::TyAndLayout, Ty, TyCtxt};
+use rustc_session::Session;
+use rustc_span::Span;
+
+use crate::base;
+use crate::traits::*;
+
+#[derive(Copy, Clone)]
+pub enum IntPredicate {
+ IntEQ,
+ IntNE,
+ IntUGT,
+ IntUGE,
+ IntULT,
+ IntULE,
+ IntSGT,
+ IntSGE,
+ IntSLT,
+ IntSLE,
+}
+
+#[derive(Copy, Clone)]
+pub enum RealPredicate {
+ RealPredicateFalse,
+ RealOEQ,
+ RealOGT,
+ RealOGE,
+ RealOLT,
+ RealOLE,
+ RealONE,
+ RealORD,
+ RealUNO,
+ RealUEQ,
+ RealUGT,
+ RealUGE,
+ RealULT,
+ RealULE,
+ RealUNE,
+ RealPredicateTrue,
+}
+
+#[derive(Copy, Clone)]
+pub enum AtomicRmwBinOp {
+ AtomicXchg,
+ AtomicAdd,
+ AtomicSub,
+ AtomicAnd,
+ AtomicNand,
+ AtomicOr,
+ AtomicXor,
+ AtomicMax,
+ AtomicMin,
+ AtomicUMax,
+ AtomicUMin,
+}
+
+#[derive(Copy, Clone)]
+pub enum AtomicOrdering {
+ Unordered,
+ Relaxed,
+ Acquire,
+ Release,
+ AcquireRelease,
+ SequentiallyConsistent,
+}
+
+#[derive(Copy, Clone)]
+pub enum SynchronizationScope {
+ SingleThread,
+ CrossThread,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum TypeKind {
+ Void,
+ Half,
+ Float,
+ Double,
+ X86_FP80,
+ FP128,
+ PPC_FP128,
+ Label,
+ Integer,
+ Function,
+ Struct,
+ Array,
+ Pointer,
+ Vector,
+ Metadata,
+ X86_MMX,
+ Token,
+ ScalableVector,
+ BFloat,
+ X86_AMX,
+}
+
+// FIXME(mw): Anything that is produced via DepGraph::with_task() must implement
+// the HashStable trait. Normally DepGraph::with_task() calls are
+// hidden behind queries, but CGU creation is a special case in two
+// ways: (1) it's not a query and (2) CGU are output nodes, so their
+// Fingerprints are not actually needed. It remains to be clarified
+// how exactly this case will be handled in the red/green system but
+// for now we content ourselves with providing a no-op HashStable
+// implementation for CGUs.
+mod temp_stable_hash_impls {
+ use crate::ModuleCodegen;
+ use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+
+ impl<HCX, M> HashStable<HCX> for ModuleCodegen<M> {
+ fn hash_stable(&self, _: &mut HCX, _: &mut StableHasher) {
+ // do nothing
+ }
+ }
+}
+
+pub fn build_langcall<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &Bx,
+ span: Option<Span>,
+ li: LangItem,
+) -> (Bx::FnAbiOfResult, Bx::Value) {
+ let tcx = bx.tcx();
+ let def_id = tcx.require_lang_item(li, span);
+ let instance = ty::Instance::mono(tcx, def_id);
+ (bx.fn_abi_of_instance(instance, ty::List::empty()), bx.get_fn_addr(instance))
+}
+
+// To avoid UB from LLVM, these two functions mask RHS with an
+// appropriate mask unconditionally (i.e., the fallback behavior for
+// all shifts). For 32- and 64-bit types, this matches the semantics
+// of Java. (See related discussion on #1877 and #10183.)
+
+pub fn build_unchecked_lshift<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ lhs: Bx::Value,
+ rhs: Bx::Value,
+) -> Bx::Value {
+ let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs);
+ // #1877, #10183: Ensure that input is always valid
+ let rhs = shift_mask_rhs(bx, rhs);
+ bx.shl(lhs, rhs)
+}
+
+pub fn build_unchecked_rshift<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ lhs_t: Ty<'tcx>,
+ lhs: Bx::Value,
+ rhs: Bx::Value,
+) -> Bx::Value {
+ let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs);
+ // #1877, #10183: Ensure that input is always valid
+ let rhs = shift_mask_rhs(bx, rhs);
+ let is_signed = lhs_t.is_signed();
+ if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) }
+}
+
+fn shift_mask_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ rhs: Bx::Value,
+) -> Bx::Value {
+ let rhs_llty = bx.val_ty(rhs);
+ let shift_val = shift_mask_val(bx, rhs_llty, rhs_llty, false);
+ bx.and(rhs, shift_val)
+}
+
+pub fn shift_mask_val<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ llty: Bx::Type,
+ mask_llty: Bx::Type,
+ invert: bool,
+) -> Bx::Value {
+ let kind = bx.type_kind(llty);
+ match kind {
+ TypeKind::Integer => {
+ // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
+ let val = bx.int_width(llty) - 1;
+ if invert {
+ bx.const_int(mask_llty, !val as i64)
+ } else {
+ bx.const_uint(mask_llty, val)
+ }
+ }
+ TypeKind::Vector => {
+ let mask =
+ shift_mask_val(bx, bx.element_type(llty), bx.element_type(mask_llty), invert);
+ bx.vector_splat(bx.vector_length(mask_llty), mask)
+ }
+ _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
+ }
+}
+
+pub fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
+ struct_span_err!(a, b, E0511, "{}", c).emit();
+}
+
+pub fn asm_const_to_str<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ sp: Span,
+ const_value: ConstValue<'tcx>,
+ ty_and_layout: TyAndLayout<'tcx>,
+) -> String {
+ let ConstValue::Scalar(scalar) = const_value else {
+ span_bug!(sp, "expected Scalar for promoted asm const, but got {:#?}", const_value)
+ };
+ let value = scalar.assert_bits(ty_and_layout.size);
+ match ty_and_layout.ty.kind() {
+ ty::Uint(_) => value.to_string(),
+ ty::Int(int_ty) => match int_ty.normalize(tcx.sess.target.pointer_width) {
+ ty::IntTy::I8 => (value as i8).to_string(),
+ ty::IntTy::I16 => (value as i16).to_string(),
+ ty::IntTy::I32 => (value as i32).to_string(),
+ ty::IntTy::I64 => (value as i64).to_string(),
+ ty::IntTy::I128 => (value as i128).to_string(),
+ ty::IntTy::Isize => unreachable!(),
+ },
+ _ => span_bug!(sp, "asm const has bad type {}", ty_and_layout.ty),
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs
new file mode 100644
index 000000000..e288760a0
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs
@@ -0,0 +1,85 @@
+use rustc_middle::mir::coverage::{CounterValueReference, MappedExpressionIndex};
+
+/// Aligns with [llvm::coverage::Counter::CounterKind](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L95)
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub enum CounterKind {
+ Zero = 0,
+ CounterValueReference = 1,
+ Expression = 2,
+}
+
+/// A reference to an instance of an abstract "counter" that will yield a value in a coverage
+/// report. Note that `id` has different interpretations, depending on the `kind`:
+/// * For `CounterKind::Zero`, `id` is assumed to be `0`
+/// * For `CounterKind::CounterValueReference`, `id` matches the `counter_id` of the injected
+/// instrumentation counter (the `index` argument to the LLVM intrinsic
+/// `instrprof.increment()`)
+/// * For `CounterKind::Expression`, `id` is the index into the coverage map's array of
+/// counter expressions.
+/// Aligns with [llvm::coverage::Counter](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L102-L103)
+/// Important: The Rust struct layout (order and types of fields) must match its C++ counterpart.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub struct Counter {
+ // Important: The layout (order and types of fields) must match its C++ counterpart.
+ pub kind: CounterKind,
+ id: u32,
+}
+
+impl Counter {
+ /// Constructs a new `Counter` of kind `Zero`. For this `CounterKind`, the
+ /// `id` is not used.
+ pub fn zero() -> Self {
+ Self { kind: CounterKind::Zero, id: 0 }
+ }
+
+ /// Constructs a new `Counter` of kind `CounterValueReference`, and converts
+ /// the given 1-based counter_id to the required 0-based equivalent for
+ /// the `Counter` encoding.
+ pub fn counter_value_reference(counter_id: CounterValueReference) -> Self {
+ Self { kind: CounterKind::CounterValueReference, id: counter_id.zero_based_index() }
+ }
+
+ /// Constructs a new `Counter` of kind `Expression`.
+ pub fn expression(mapped_expression_index: MappedExpressionIndex) -> Self {
+ Self { kind: CounterKind::Expression, id: mapped_expression_index.into() }
+ }
+
+ /// Returns true if the `Counter` kind is `Zero`.
+ pub fn is_zero(&self) -> bool {
+ matches!(self.kind, CounterKind::Zero)
+ }
+
+ /// An explicitly-named function to get the ID value, making it more obvious
+ /// that the stored value is now 0-based.
+ pub fn zero_based_id(&self) -> u32 {
+ debug_assert!(!self.is_zero(), "`id` is undefined for CounterKind::Zero");
+ self.id
+ }
+}
+
+/// Aligns with [llvm::coverage::CounterExpression::ExprKind](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L150)
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub enum ExprKind {
+ Subtract = 0,
+ Add = 1,
+}
+
+/// Aligns with [llvm::coverage::CounterExpression](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L151-L152)
+/// Important: The Rust struct layout (order and types of fields) must match its C++
+/// counterpart.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub struct CounterExpression {
+ pub kind: ExprKind,
+ pub lhs: Counter,
+ pub rhs: Counter,
+}
+
+impl CounterExpression {
+ pub fn new(lhs: Counter, kind: ExprKind, rhs: Counter) -> Self {
+ Self { kind, lhs, rhs }
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs b/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs
new file mode 100644
index 000000000..1a6495cb1
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs
@@ -0,0 +1,347 @@
+pub use super::ffi::*;
+
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::coverage::{
+ CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId,
+ InjectedExpressionIndex, MappedExpressionIndex, Op,
+};
+use rustc_middle::ty::Instance;
+use rustc_middle::ty::TyCtxt;
+
+#[derive(Clone, Debug, PartialEq)]
+pub struct Expression {
+ lhs: ExpressionOperandId,
+ op: Op,
+ rhs: ExpressionOperandId,
+ region: Option<CodeRegion>,
+}
+
+/// Collects all of the coverage regions associated with (a) injected counters, (b) counter
+/// expressions (additions or subtraction), and (c) unreachable regions (always counted as zero),
+/// for a given Function. Counters and counter expressions have non-overlapping `id`s because they
+/// can both be operands in an expression. This struct also stores the `function_source_hash`,
+/// computed during instrumentation, and forwarded with counters.
+///
+/// Note, it may be important to understand LLVM's definitions of `unreachable` regions versus "gap
+/// regions" (or "gap areas"). A gap region is a code region within a counted region (either counter
+/// or expression), but the line or lines in the gap region are not executable (such as lines with
+/// only whitespace or comments). According to LLVM Code Coverage Mapping documentation, "A count
+/// for a gap area is only used as the line execution count if there are no other regions on a
+/// line."
+#[derive(Debug)]
+pub struct FunctionCoverage<'tcx> {
+ instance: Instance<'tcx>,
+ source_hash: u64,
+ is_used: bool,
+ counters: IndexVec<CounterValueReference, Option<CodeRegion>>,
+ expressions: IndexVec<InjectedExpressionIndex, Option<Expression>>,
+ unreachable_regions: Vec<CodeRegion>,
+}
+
+impl<'tcx> FunctionCoverage<'tcx> {
+ /// Creates a new set of coverage data for a used (called) function.
+ pub fn new(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+ Self::create(tcx, instance, true)
+ }
+
+ /// Creates a new set of coverage data for an unused (never called) function.
+ pub fn unused(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+ Self::create(tcx, instance, false)
+ }
+
+ fn create(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>, is_used: bool) -> Self {
+ let coverageinfo = tcx.coverageinfo(instance.def);
+ debug!(
+ "FunctionCoverage::create(instance={:?}) has coverageinfo={:?}. is_used={}",
+ instance, coverageinfo, is_used
+ );
+ Self {
+ instance,
+ source_hash: 0, // will be set with the first `add_counter()`
+ is_used,
+ counters: IndexVec::from_elem_n(None, coverageinfo.num_counters as usize),
+ expressions: IndexVec::from_elem_n(None, coverageinfo.num_expressions as usize),
+ unreachable_regions: Vec::new(),
+ }
+ }
+
+ /// Returns true for a used (called) function, and false for an unused function.
+ pub fn is_used(&self) -> bool {
+ self.is_used
+ }
+
+ /// Sets the function source hash value. If called multiple times for the same function, all
+ /// calls should have the same hash value.
+ pub fn set_function_source_hash(&mut self, source_hash: u64) {
+ if self.source_hash == 0 {
+ self.source_hash = source_hash;
+ } else {
+ debug_assert_eq!(source_hash, self.source_hash);
+ }
+ }
+
+ /// Adds a code region to be counted by an injected counter intrinsic.
+ pub fn add_counter(&mut self, id: CounterValueReference, region: CodeRegion) {
+ if let Some(previous_region) = self.counters[id].replace(region.clone()) {
+ assert_eq!(previous_region, region, "add_counter: code region for id changed");
+ }
+ }
+
+ /// Both counters and "counter expressions" (or simply, "expressions") can be operands in other
+ /// expressions. Expression IDs start from `u32::MAX` and go down, so the range of expression
+ /// IDs will not overlap with the range of counter IDs. Counters and expressions can be added in
+ /// any order, and expressions can still be assigned contiguous (though descending) IDs, without
+ /// knowing what the last counter ID will be.
+ ///
+ /// When storing the expression data in the `expressions` vector in the `FunctionCoverage`
+ /// struct, its vector index is computed, from the given expression ID, by subtracting from
+ /// `u32::MAX`.
+ ///
+ /// Since the expression operands (`lhs` and `rhs`) can reference either counters or
+ /// expressions, an operand that references an expression also uses its original ID, descending
+ /// from `u32::MAX`. Theses operands are translated only during code generation, after all
+ /// counters and expressions have been added.
+ pub fn add_counter_expression(
+ &mut self,
+ expression_id: InjectedExpressionId,
+ lhs: ExpressionOperandId,
+ op: Op,
+ rhs: ExpressionOperandId,
+ region: Option<CodeRegion>,
+ ) {
+ debug!(
+ "add_counter_expression({:?}, lhs={:?}, op={:?}, rhs={:?} at {:?}",
+ expression_id, lhs, op, rhs, region
+ );
+ let expression_index = self.expression_index(u32::from(expression_id));
+ debug_assert!(
+ expression_index.as_usize() < self.expressions.len(),
+ "expression_index {} is out of range for expressions.len() = {}
+ for {:?}",
+ expression_index.as_usize(),
+ self.expressions.len(),
+ self,
+ );
+ if let Some(previous_expression) = self.expressions[expression_index].replace(Expression {
+ lhs,
+ op,
+ rhs,
+ region: region.clone(),
+ }) {
+ assert_eq!(
+ previous_expression,
+ Expression { lhs, op, rhs, region },
+ "add_counter_expression: expression for id changed"
+ );
+ }
+ }
+
+ /// Add a region that will be marked as "unreachable", with a constant "zero counter".
+ pub fn add_unreachable_region(&mut self, region: CodeRegion) {
+ self.unreachable_regions.push(region)
+ }
+
+ /// Return the source hash, generated from the HIR node structure, and used to indicate whether
+ /// or not the source code structure changed between different compilations.
+ pub fn source_hash(&self) -> u64 {
+ self.source_hash
+ }
+
+ /// Generate an array of CounterExpressions, and an iterator over all `Counter`s and their
+ /// associated `Regions` (from which the LLVM-specific `CoverageMapGenerator` will create
+ /// `CounterMappingRegion`s.
+ pub fn get_expressions_and_counter_regions(
+ &self,
+ ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &CodeRegion)>) {
+ assert!(
+ self.source_hash != 0 || !self.is_used,
+ "No counters provided the source_hash for used function: {:?}",
+ self.instance
+ );
+
+ let counter_regions = self.counter_regions();
+ let (counter_expressions, expression_regions) = self.expressions_with_regions();
+ let unreachable_regions = self.unreachable_regions();
+
+ let counter_regions =
+ counter_regions.chain(expression_regions.into_iter().chain(unreachable_regions));
+ (counter_expressions, counter_regions)
+ }
+
+ fn counter_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
+ self.counters.iter_enumerated().filter_map(|(index, entry)| {
+ // Option::map() will return None to filter out missing counters. This may happen
+ // if, for example, a MIR-instrumented counter is removed during an optimization.
+ entry.as_ref().map(|region| (Counter::counter_value_reference(index), region))
+ })
+ }
+
+ fn expressions_with_regions(
+ &self,
+ ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &CodeRegion)>) {
+ let mut counter_expressions = Vec::with_capacity(self.expressions.len());
+ let mut expression_regions = Vec::with_capacity(self.expressions.len());
+ let mut new_indexes = IndexVec::from_elem_n(None, self.expressions.len());
+
+ // This closure converts any `Expression` operand (`lhs` or `rhs` of the `Op::Add` or
+ // `Op::Subtract` operation) into its native `llvm::coverage::Counter::CounterKind` type
+ // and value. Operand ID value `0` maps to `CounterKind::Zero`; values in the known range
+ // of injected LLVM counters map to `CounterKind::CounterValueReference` (and the value
+ // matches the injected counter index); and any other value is converted into a
+ // `CounterKind::Expression` with the expression's `new_index`.
+ //
+ // Expressions will be returned from this function in a sequential vector (array) of
+ // `CounterExpression`, so the expression IDs must be mapped from their original,
+ // potentially sparse set of indexes, originally in reverse order from `u32::MAX`.
+ //
+ // An `Expression` as an operand will have already been encountered as an `Expression` with
+ // operands, so its new_index will already have been generated (as a 1-up index value).
+ // (If an `Expression` as an operand does not have a corresponding new_index, it was
+ // probably optimized out, after the expression was injected into the MIR, so it will
+ // get a `CounterKind::Zero` instead.)
+ //
+ // In other words, an `Expression`s at any given index can include other expressions as
+ // operands, but expression operands can only come from the subset of expressions having
+ // `expression_index`s lower than the referencing `Expression`. Therefore, it is
+ // reasonable to look up the new index of an expression operand while the `new_indexes`
+ // vector is only complete up to the current `ExpressionIndex`.
+ let id_to_counter = |new_indexes: &IndexVec<
+ InjectedExpressionIndex,
+ Option<MappedExpressionIndex>,
+ >,
+ id: ExpressionOperandId| {
+ if id == ExpressionOperandId::ZERO {
+ Some(Counter::zero())
+ } else if id.index() < self.counters.len() {
+ debug_assert!(
+ id.index() > 0,
+ "ExpressionOperandId indexes for counters are 1-based, but this id={}",
+ id.index()
+ );
+ // Note: Some codegen-injected Counters may be only referenced by `Expression`s,
+ // and may not have their own `CodeRegion`s,
+ let index = CounterValueReference::from(id.index());
+ // Note, the conversion to LLVM `Counter` adjusts the index to be zero-based.
+ Some(Counter::counter_value_reference(index))
+ } else {
+ let index = self.expression_index(u32::from(id));
+ self.expressions
+ .get(index)
+ .expect("expression id is out of range")
+ .as_ref()
+ // If an expression was optimized out, assume it would have produced a count
+ // of zero. This ensures that expressions dependent on optimized-out
+ // expressions are still valid.
+ .map_or(Some(Counter::zero()), |_| new_indexes[index].map(Counter::expression))
+ }
+ };
+
+ for (original_index, expression) in
+ self.expressions.iter_enumerated().filter_map(|(original_index, entry)| {
+ // Option::map() will return None to filter out missing expressions. This may happen
+ // if, for example, a MIR-instrumented expression is removed during an optimization.
+ entry.as_ref().map(|expression| (original_index, expression))
+ })
+ {
+ let optional_region = &expression.region;
+ let Expression { lhs, op, rhs, .. } = *expression;
+
+ if let Some(Some((lhs_counter, mut rhs_counter))) = id_to_counter(&new_indexes, lhs)
+ .map(|lhs_counter| {
+ id_to_counter(&new_indexes, rhs).map(|rhs_counter| (lhs_counter, rhs_counter))
+ })
+ {
+ if lhs_counter.is_zero() && op.is_subtract() {
+ // The left side of a subtraction was probably optimized out. As an example,
+ // a branch condition might be evaluated as a constant expression, and the
+ // branch could be removed, dropping unused counters in the process.
+ //
+ // Since counters are unsigned, we must assume the result of the expression
+ // can be no more and no less than zero. An expression known to evaluate to zero
+ // does not need to be added to the coverage map.
+ //
+ // Coverage test `loops_branches.rs` includes multiple variations of branches
+ // based on constant conditional (literal `true` or `false`), and demonstrates
+ // that the expected counts are still correct.
+ debug!(
+ "Expression subtracts from zero (assume unreachable): \
+ original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}",
+ original_index, lhs, op, rhs, optional_region,
+ );
+ rhs_counter = Counter::zero();
+ }
+ debug_assert!(
+ lhs_counter.is_zero()
+ // Note: with `as usize` the ID _could_ overflow/wrap if `usize = u16`
+ || ((lhs_counter.zero_based_id() as usize)
+ <= usize::max(self.counters.len(), self.expressions.len())),
+ "lhs id={} > both counters.len()={} and expressions.len()={}
+ ({:?} {:?} {:?})",
+ lhs_counter.zero_based_id(),
+ self.counters.len(),
+ self.expressions.len(),
+ lhs_counter,
+ op,
+ rhs_counter,
+ );
+
+ debug_assert!(
+ rhs_counter.is_zero()
+ // Note: with `as usize` the ID _could_ overflow/wrap if `usize = u16`
+ || ((rhs_counter.zero_based_id() as usize)
+ <= usize::max(self.counters.len(), self.expressions.len())),
+ "rhs id={} > both counters.len()={} and expressions.len()={}
+ ({:?} {:?} {:?})",
+ rhs_counter.zero_based_id(),
+ self.counters.len(),
+ self.expressions.len(),
+ lhs_counter,
+ op,
+ rhs_counter,
+ );
+
+ // Both operands exist. `Expression` operands exist in `self.expressions` and have
+ // been assigned a `new_index`.
+ let mapped_expression_index =
+ MappedExpressionIndex::from(counter_expressions.len());
+ let expression = CounterExpression::new(
+ lhs_counter,
+ match op {
+ Op::Add => ExprKind::Add,
+ Op::Subtract => ExprKind::Subtract,
+ },
+ rhs_counter,
+ );
+ debug!(
+ "Adding expression {:?} = {:?}, region: {:?}",
+ mapped_expression_index, expression, optional_region
+ );
+ counter_expressions.push(expression);
+ new_indexes[original_index] = Some(mapped_expression_index);
+ if let Some(region) = optional_region {
+ expression_regions.push((Counter::expression(mapped_expression_index), region));
+ }
+ } else {
+ bug!(
+ "expression has one or more missing operands \
+ original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}",
+ original_index,
+ lhs,
+ op,
+ rhs,
+ optional_region,
+ );
+ }
+ }
+ (counter_expressions, expression_regions.into_iter())
+ }
+
+ fn unreachable_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
+ self.unreachable_regions.iter().map(|region| (Counter::zero(), region))
+ }
+
+ fn expression_index(&self, id_descending_from_max: u32) -> InjectedExpressionIndex {
+ debug_assert!(id_descending_from_max >= self.counters.len() as u32);
+ InjectedExpressionIndex::from(u32::MAX - id_descending_from_max)
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/coverageinfo/mod.rs b/compiler/rustc_codegen_ssa/src/coverageinfo/mod.rs
new file mode 100644
index 000000000..569fd3f1a
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/coverageinfo/mod.rs
@@ -0,0 +1,2 @@
+pub mod ffi;
+pub mod map;
diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/mod.rs b/compiler/rustc_codegen_ssa/src/debuginfo/mod.rs
new file mode 100644
index 000000000..6e3f4f0b8
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/debuginfo/mod.rs
@@ -0,0 +1,34 @@
+use rustc_middle::ty::{self, layout::TyAndLayout};
+use rustc_target::abi::Size;
+
+// FIXME(eddyb) find a place for this (or a way to replace it).
+pub mod type_names;
+
+/// Returns true if we want to generate a DW_TAG_enumeration_type description for
+/// this instead of a DW_TAG_struct_type with DW_TAG_variant_part.
+///
+/// NOTE: This is somewhat inconsistent right now: For empty enums and enums with a single
+/// fieldless variant, we generate DW_TAG_struct_type, although a
+/// DW_TAG_enumeration_type would be a better fit.
+pub fn wants_c_like_enum_debuginfo<'tcx>(enum_type_and_layout: TyAndLayout<'tcx>) -> bool {
+ match enum_type_and_layout.ty.kind() {
+ ty::Adt(adt_def, _) => {
+ if !adt_def.is_enum() {
+ return false;
+ }
+
+ match adt_def.variants().len() {
+ 0 => false,
+ 1 => {
+ // Univariant enums unless they are zero-sized
+ enum_type_and_layout.size != Size::ZERO && adt_def.all_fields().count() == 0
+ }
+ _ => {
+ // Enums with more than one variant if they have no fields
+ adt_def.all_fields().count() == 0
+ }
+ }
+ }
+ _ => false,
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
new file mode 100644
index 000000000..8cd5a0fc2
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
@@ -0,0 +1,821 @@
+// Type Names for Debug Info.
+
+// Notes on targeting MSVC:
+// In general, MSVC's debugger attempts to parse all arguments as C++ expressions,
+// even if the argument is explicitly a symbol name.
+// As such, there are many things that cause parsing issues:
+// * `#` is treated as a special character for macros.
+// * `{` or `<` at the beginning of a name is treated as an operator.
+// * `>>` is always treated as a right-shift.
+// * `[` in a name is treated like a regex bracket expression (match any char
+// within the brackets).
+// * `"` is treated as the start of a string.
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_hir::def_id::DefId;
+use rustc_hir::definitions::{DefPathData, DefPathDataName, DisambiguatedDefPathData};
+use rustc_hir::{AsyncGeneratorKind, GeneratorKind, Mutability};
+use rustc_middle::ty::layout::{IntegerExt, TyAndLayout};
+use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
+use rustc_middle::ty::{self, ExistentialProjection, GeneratorSubsts, ParamEnv, Ty, TyCtxt};
+use rustc_target::abi::{Integer, TagEncoding, Variants};
+use smallvec::SmallVec;
+
+use std::borrow::Cow;
+use std::fmt::Write;
+
+use crate::debuginfo::wants_c_like_enum_debuginfo;
+
+// Compute the name of the type as it should be stored in debuginfo. Does not do
+// any caching, i.e., calling the function twice with the same type will also do
+// the work twice. The `qualified` parameter only affects the first level of the
+// type name, further levels (i.e., type parameters) are always fully qualified.
+pub fn compute_debuginfo_type_name<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ t: Ty<'tcx>,
+ qualified: bool,
+) -> String {
+ let _prof = tcx.prof.generic_activity("compute_debuginfo_type_name");
+
+ let mut result = String::with_capacity(64);
+ let mut visited = FxHashSet::default();
+ push_debuginfo_type_name(tcx, t, qualified, &mut result, &mut visited);
+ result
+}
+
+// Pushes the name of the type as it should be stored in debuginfo on the
+// `output` String. See also compute_debuginfo_type_name().
+fn push_debuginfo_type_name<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ t: Ty<'tcx>,
+ qualified: bool,
+ output: &mut String,
+ visited: &mut FxHashSet<Ty<'tcx>>,
+) {
+ // When targeting MSVC, emit C++ style type names for compatibility with
+ // .natvis visualizers (and perhaps other existing native debuggers?)
+ let cpp_like_debuginfo = cpp_like_debuginfo(tcx);
+
+ match *t.kind() {
+ ty::Bool => output.push_str("bool"),
+ ty::Char => output.push_str("char"),
+ ty::Str => output.push_str("str"),
+ ty::Never => {
+ if cpp_like_debuginfo {
+ output.push_str("never$");
+ } else {
+ output.push('!');
+ }
+ }
+ ty::Int(int_ty) => output.push_str(int_ty.name_str()),
+ ty::Uint(uint_ty) => output.push_str(uint_ty.name_str()),
+ ty::Float(float_ty) => output.push_str(float_ty.name_str()),
+ ty::Foreign(def_id) => push_item_name(tcx, def_id, qualified, output),
+ ty::Adt(def, substs) => {
+ // `layout_for_cpp_like_fallback` will be `Some` if we want to use the fallback encoding.
+ let layout_for_cpp_like_fallback = if cpp_like_debuginfo && def.is_enum() {
+ match tcx.layout_of(ParamEnv::reveal_all().and(t)) {
+ Ok(layout) => {
+ if !wants_c_like_enum_debuginfo(layout) {
+ Some(layout)
+ } else {
+ // This is a C-like enum so we don't want to use the fallback encoding
+ // for the name.
+ None
+ }
+ }
+ Err(e) => {
+ // Computing the layout can still fail here, e.g. if the target architecture
+ // cannot represent the type. See https://github.com/rust-lang/rust/issues/94961.
+ tcx.sess.fatal(&format!("{}", e));
+ }
+ }
+ } else {
+ // We are not emitting cpp-like debuginfo or this isn't even an enum.
+ None
+ };
+
+ if let Some(ty_and_layout) = layout_for_cpp_like_fallback {
+ msvc_enum_fallback(
+ tcx,
+ ty_and_layout,
+ &|output, visited| {
+ push_item_name(tcx, def.did(), true, output);
+ push_generic_params_internal(tcx, substs, output, visited);
+ },
+ output,
+ visited,
+ );
+ } else {
+ push_item_name(tcx, def.did(), qualified, output);
+ push_generic_params_internal(tcx, substs, output, visited);
+ }
+ }
+ ty::Tuple(component_types) => {
+ if cpp_like_debuginfo {
+ output.push_str("tuple$<");
+ } else {
+ output.push('(');
+ }
+
+ for component_type in component_types {
+ push_debuginfo_type_name(tcx, component_type, true, output, visited);
+ push_arg_separator(cpp_like_debuginfo, output);
+ }
+ if !component_types.is_empty() {
+ pop_arg_separator(output);
+ }
+
+ if cpp_like_debuginfo {
+ push_close_angle_bracket(cpp_like_debuginfo, output);
+ } else {
+ output.push(')');
+ }
+ }
+ ty::RawPtr(ty::TypeAndMut { ty: inner_type, mutbl }) => {
+ if cpp_like_debuginfo {
+ match mutbl {
+ Mutability::Not => output.push_str("ptr_const$<"),
+ Mutability::Mut => output.push_str("ptr_mut$<"),
+ }
+ } else {
+ output.push('*');
+ match mutbl {
+ Mutability::Not => output.push_str("const "),
+ Mutability::Mut => output.push_str("mut "),
+ }
+ }
+
+ push_debuginfo_type_name(tcx, inner_type, qualified, output, visited);
+
+ if cpp_like_debuginfo {
+ push_close_angle_bracket(cpp_like_debuginfo, output);
+ }
+ }
+ ty::Ref(_, inner_type, mutbl) => {
+ // Slices and `&str` are treated like C++ pointers when computing debug
+ // info for MSVC debugger. However, wrapping these types' names in a synthetic type
+ // causes the .natvis engine for WinDbg to fail to display their data, so we opt these
+ // types out to aid debugging in MSVC.
+ let is_slice_or_str = matches!(*inner_type.kind(), ty::Slice(_) | ty::Str);
+
+ if !cpp_like_debuginfo {
+ output.push('&');
+ output.push_str(mutbl.prefix_str());
+ } else if !is_slice_or_str {
+ match mutbl {
+ Mutability::Not => output.push_str("ref$<"),
+ Mutability::Mut => output.push_str("ref_mut$<"),
+ }
+ }
+
+ push_debuginfo_type_name(tcx, inner_type, qualified, output, visited);
+
+ if cpp_like_debuginfo && !is_slice_or_str {
+ push_close_angle_bracket(cpp_like_debuginfo, output);
+ }
+ }
+ ty::Array(inner_type, len) => {
+ if cpp_like_debuginfo {
+ output.push_str("array$<");
+ push_debuginfo_type_name(tcx, inner_type, true, output, visited);
+ match len.kind() {
+ ty::ConstKind::Param(param) => write!(output, ",{}>", param.name).unwrap(),
+ _ => write!(output, ",{}>", len.eval_usize(tcx, ty::ParamEnv::reveal_all()))
+ .unwrap(),
+ }
+ } else {
+ output.push('[');
+ push_debuginfo_type_name(tcx, inner_type, true, output, visited);
+ match len.kind() {
+ ty::ConstKind::Param(param) => write!(output, "; {}]", param.name).unwrap(),
+ _ => write!(output, "; {}]", len.eval_usize(tcx, ty::ParamEnv::reveal_all()))
+ .unwrap(),
+ }
+ }
+ }
+ ty::Slice(inner_type) => {
+ if cpp_like_debuginfo {
+ output.push_str("slice$<");
+ } else {
+ output.push('[');
+ }
+
+ push_debuginfo_type_name(tcx, inner_type, true, output, visited);
+
+ if cpp_like_debuginfo {
+ push_close_angle_bracket(cpp_like_debuginfo, output);
+ } else {
+ output.push(']');
+ }
+ }
+ ty::Dynamic(ref trait_data, ..) => {
+ let auto_traits: SmallVec<[DefId; 4]> = trait_data.auto_traits().collect();
+
+ let has_enclosing_parens = if cpp_like_debuginfo {
+ output.push_str("dyn$<");
+ false
+ } else {
+ if trait_data.len() > 1 && auto_traits.len() != 0 {
+ // We need enclosing parens because there is more than one trait
+ output.push_str("(dyn ");
+ true
+ } else {
+ output.push_str("dyn ");
+ false
+ }
+ };
+
+ if let Some(principal) = trait_data.principal() {
+ let principal =
+ tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), principal);
+ push_item_name(tcx, principal.def_id, qualified, output);
+ let principal_has_generic_params =
+ push_generic_params_internal(tcx, principal.substs, output, visited);
+
+ let projection_bounds: SmallVec<[_; 4]> = trait_data
+ .projection_bounds()
+ .map(|bound| {
+ let ExistentialProjection { item_def_id, term, .. } =
+ tcx.erase_late_bound_regions(bound);
+ // FIXME(associated_const_equality): allow for consts here
+ (item_def_id, term.ty().unwrap())
+ })
+ .collect();
+
+ if projection_bounds.len() != 0 {
+ if principal_has_generic_params {
+ // push_generic_params_internal() above added a `>` but we actually
+ // want to add more items to that list, so remove that again...
+ pop_close_angle_bracket(output);
+ // .. and add a comma to separate the regular generic args from the
+ // associated types.
+ push_arg_separator(cpp_like_debuginfo, output);
+ } else {
+ // push_generic_params_internal() did not add `<...>`, so we open
+ // angle brackets here.
+ output.push('<');
+ }
+
+ for (item_def_id, ty) in projection_bounds {
+ if cpp_like_debuginfo {
+ output.push_str("assoc$<");
+ push_item_name(tcx, item_def_id, false, output);
+ push_arg_separator(cpp_like_debuginfo, output);
+ push_debuginfo_type_name(tcx, ty, true, output, visited);
+ push_close_angle_bracket(cpp_like_debuginfo, output);
+ } else {
+ push_item_name(tcx, item_def_id, false, output);
+ output.push('=');
+ push_debuginfo_type_name(tcx, ty, true, output, visited);
+ }
+ push_arg_separator(cpp_like_debuginfo, output);
+ }
+
+ pop_arg_separator(output);
+ push_close_angle_bracket(cpp_like_debuginfo, output);
+ }
+
+ if auto_traits.len() != 0 {
+ push_auto_trait_separator(cpp_like_debuginfo, output);
+ }
+ }
+
+ if auto_traits.len() != 0 {
+ let mut auto_traits: SmallVec<[String; 4]> = auto_traits
+ .into_iter()
+ .map(|def_id| {
+ let mut name = String::with_capacity(20);
+ push_item_name(tcx, def_id, true, &mut name);
+ name
+ })
+ .collect();
+ auto_traits.sort_unstable();
+
+ for auto_trait in auto_traits {
+ output.push_str(&auto_trait);
+ push_auto_trait_separator(cpp_like_debuginfo, output);
+ }
+
+ pop_auto_trait_separator(output);
+ }
+
+ if cpp_like_debuginfo {
+ push_close_angle_bracket(cpp_like_debuginfo, output);
+ } else if has_enclosing_parens {
+ output.push(')');
+ }
+ }
+ ty::FnDef(..) | ty::FnPtr(_) => {
+ // We've encountered a weird 'recursive type'
+ // Currently, the only way to generate such a type
+ // is by using 'impl trait':
+ //
+ // fn foo() -> impl Copy { foo }
+ //
+ // There's not really a sensible name we can generate,
+ // since we don't include 'impl trait' types (e.g. ty::Opaque)
+ // in the output
+ //
+ // Since we need to generate *something*, we just
+ // use a dummy string that should make it clear
+ // that something unusual is going on
+ if !visited.insert(t) {
+ output.push_str(if cpp_like_debuginfo {
+ "recursive_type$"
+ } else {
+ "<recursive_type>"
+ });
+ return;
+ }
+
+ let sig =
+ tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), t.fn_sig(tcx));
+
+ if cpp_like_debuginfo {
+ // Format as a C++ function pointer: return_type (*)(params...)
+ if sig.output().is_unit() {
+ output.push_str("void");
+ } else {
+ push_debuginfo_type_name(tcx, sig.output(), true, output, visited);
+ }
+ output.push_str(" (*)(");
+ } else {
+ output.push_str(sig.unsafety.prefix_str());
+
+ if sig.abi != rustc_target::spec::abi::Abi::Rust {
+ output.push_str("extern \"");
+ output.push_str(sig.abi.name());
+ output.push_str("\" ");
+ }
+
+ output.push_str("fn(");
+ }
+
+ if !sig.inputs().is_empty() {
+ for &parameter_type in sig.inputs() {
+ push_debuginfo_type_name(tcx, parameter_type, true, output, visited);
+ push_arg_separator(cpp_like_debuginfo, output);
+ }
+ pop_arg_separator(output);
+ }
+
+ if sig.c_variadic {
+ if !sig.inputs().is_empty() {
+ output.push_str(", ...");
+ } else {
+ output.push_str("...");
+ }
+ }
+
+ output.push(')');
+
+ if !cpp_like_debuginfo && !sig.output().is_unit() {
+ output.push_str(" -> ");
+ push_debuginfo_type_name(tcx, sig.output(), true, output, visited);
+ }
+
+ // We only keep the type in 'visited'
+ // for the duration of the body of this method.
+ // It's fine for a particular function type
+ // to show up multiple times in one overall type
+ // (e.g. MyType<fn() -> u8, fn() -> u8>
+ //
+ // We only care about avoiding recursing
+ // directly back to the type we're currently
+ // processing
+ visited.remove(&t);
+ }
+ ty::Closure(def_id, substs) | ty::Generator(def_id, substs, ..) => {
+ // Name will be "{closure_env#0}<T1, T2, ...>", "{generator_env#0}<T1, T2, ...>", or
+ // "{async_fn_env#0}<T1, T2, ...>", etc.
+ // In the case of cpp-like debuginfo, the name additionally gets wrapped inside of
+ // an artificial `enum$<>` type, as defined in msvc_enum_fallback().
+ if cpp_like_debuginfo && t.is_generator() {
+ let ty_and_layout = tcx.layout_of(ParamEnv::reveal_all().and(t)).unwrap();
+ msvc_enum_fallback(
+ tcx,
+ ty_and_layout,
+ &|output, visited| {
+ push_closure_or_generator_name(tcx, def_id, substs, true, output, visited);
+ },
+ output,
+ visited,
+ );
+ } else {
+ push_closure_or_generator_name(tcx, def_id, substs, qualified, output, visited);
+ }
+ }
+ // Type parameters from polymorphized functions.
+ ty::Param(_) => {
+ write!(output, "{:?}", t).unwrap();
+ }
+ ty::Error(_)
+ | ty::Infer(_)
+ | ty::Placeholder(..)
+ | ty::Projection(..)
+ | ty::Bound(..)
+ | ty::Opaque(..)
+ | ty::GeneratorWitness(..) => {
+ bug!(
+ "debuginfo: Trying to create type name for \
+ unexpected type: {:?}",
+ t
+ );
+ }
+ }
+
+ /// MSVC names enums differently than other platforms so that the debugging visualization
+ // format (natvis) is able to understand enums and render the active variant correctly in the
+ // debugger. For more information, look in `src/etc/natvis/intrinsic.natvis` and
+ // `EnumMemberDescriptionFactor::create_member_descriptions`.
+ fn msvc_enum_fallback<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty_and_layout: TyAndLayout<'tcx>,
+ push_inner: &dyn Fn(/*output*/ &mut String, /*visited*/ &mut FxHashSet<Ty<'tcx>>),
+ output: &mut String,
+ visited: &mut FxHashSet<Ty<'tcx>>,
+ ) {
+ debug_assert!(!wants_c_like_enum_debuginfo(ty_and_layout));
+ let ty = ty_and_layout.ty;
+
+ output.push_str("enum$<");
+ push_inner(output, visited);
+
+ let variant_name = |variant_index| match ty.kind() {
+ ty::Adt(adt_def, _) => {
+ debug_assert!(adt_def.is_enum());
+ Cow::from(adt_def.variant(variant_index).name.as_str())
+ }
+ ty::Generator(..) => GeneratorSubsts::variant_name(variant_index),
+ _ => unreachable!(),
+ };
+
+ if let Variants::Multiple {
+ tag_encoding: TagEncoding::Niche { dataful_variant, .. },
+ tag,
+ variants,
+ ..
+ } = &ty_and_layout.variants
+ {
+ let dataful_variant_layout = &variants[*dataful_variant];
+
+ // calculate the range of values for the dataful variant
+ let dataful_discriminant_range =
+ dataful_variant_layout.largest_niche().unwrap().valid_range;
+
+ let min = dataful_discriminant_range.start;
+ let min = tag.size(&tcx).truncate(min);
+
+ let max = dataful_discriminant_range.end;
+ let max = tag.size(&tcx).truncate(max);
+
+ let dataful_variant_name = variant_name(*dataful_variant);
+ write!(output, ", {}, {}, {}", min, max, dataful_variant_name).unwrap();
+ } else if let Variants::Single { index: variant_idx } = &ty_and_layout.variants {
+ // Uninhabited enums can't be constructed and should never need to be visualized so
+ // skip this step for them.
+ if !ty_and_layout.abi.is_uninhabited() {
+ write!(output, ", {}", variant_name(*variant_idx)).unwrap();
+ }
+ }
+ push_close_angle_bracket(true, output);
+ }
+
+ const NON_CPP_AUTO_TRAIT_SEPARATOR: &str = " + ";
+
+ fn push_auto_trait_separator(cpp_like_debuginfo: bool, output: &mut String) {
+ if cpp_like_debuginfo {
+ push_arg_separator(cpp_like_debuginfo, output);
+ } else {
+ output.push_str(NON_CPP_AUTO_TRAIT_SEPARATOR);
+ }
+ }
+
+ fn pop_auto_trait_separator(output: &mut String) {
+ if output.ends_with(NON_CPP_AUTO_TRAIT_SEPARATOR) {
+ output.truncate(output.len() - NON_CPP_AUTO_TRAIT_SEPARATOR.len());
+ } else {
+ pop_arg_separator(output);
+ }
+ }
+}
+
+pub enum VTableNameKind {
+ // Is the name for the const/static holding the vtable?
+ GlobalVariable,
+ // Is the name for the type of the vtable?
+ Type,
+}
+
+/// Computes a name for the global variable storing a vtable (or the type of that global variable).
+///
+/// The name is of the form:
+///
+/// `<path::to::SomeType as path::to::SomeTrait>::{vtable}`
+///
+/// or, when generating C++-like names:
+///
+/// `impl$<path::to::SomeType, path::to::SomeTrait>::vtable$`
+///
+/// If `kind` is `VTableNameKind::Type` then the last component is `{vtable_ty}` instead of just
+/// `{vtable}`, so that the type and the corresponding global variable get assigned different
+/// names.
+pub fn compute_debuginfo_vtable_name<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ t: Ty<'tcx>,
+ trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+ kind: VTableNameKind,
+) -> String {
+ let cpp_like_debuginfo = cpp_like_debuginfo(tcx);
+
+ let mut vtable_name = String::with_capacity(64);
+
+ if cpp_like_debuginfo {
+ vtable_name.push_str("impl$<");
+ } else {
+ vtable_name.push('<');
+ }
+
+ let mut visited = FxHashSet::default();
+ push_debuginfo_type_name(tcx, t, true, &mut vtable_name, &mut visited);
+
+ if cpp_like_debuginfo {
+ vtable_name.push_str(", ");
+ } else {
+ vtable_name.push_str(" as ");
+ }
+
+ if let Some(trait_ref) = trait_ref {
+ let trait_ref =
+ tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), trait_ref);
+ push_item_name(tcx, trait_ref.def_id, true, &mut vtable_name);
+ visited.clear();
+ push_generic_params_internal(tcx, trait_ref.substs, &mut vtable_name, &mut visited);
+ } else {
+ vtable_name.push_str("_");
+ }
+
+ push_close_angle_bracket(cpp_like_debuginfo, &mut vtable_name);
+
+ let suffix = match (cpp_like_debuginfo, kind) {
+ (true, VTableNameKind::GlobalVariable) => "::vtable$",
+ (false, VTableNameKind::GlobalVariable) => "::{vtable}",
+ (true, VTableNameKind::Type) => "::vtable_type$",
+ (false, VTableNameKind::Type) => "::{vtable_type}",
+ };
+
+ vtable_name.reserve_exact(suffix.len());
+ vtable_name.push_str(suffix);
+
+ vtable_name
+}
+
+pub fn push_item_name(tcx: TyCtxt<'_>, def_id: DefId, qualified: bool, output: &mut String) {
+ let def_key = tcx.def_key(def_id);
+ if qualified {
+ if let Some(parent) = def_key.parent {
+ push_item_name(tcx, DefId { krate: def_id.krate, index: parent }, true, output);
+ output.push_str("::");
+ }
+ }
+
+ push_unqualified_item_name(tcx, def_id, def_key.disambiguated_data, output);
+}
+
+fn generator_kind_label(generator_kind: Option<GeneratorKind>) -> &'static str {
+ match generator_kind {
+ Some(GeneratorKind::Async(AsyncGeneratorKind::Block)) => "async_block",
+ Some(GeneratorKind::Async(AsyncGeneratorKind::Closure)) => "async_closure",
+ Some(GeneratorKind::Async(AsyncGeneratorKind::Fn)) => "async_fn",
+ Some(GeneratorKind::Gen) => "generator",
+ None => "closure",
+ }
+}
+
+fn push_disambiguated_special_name(
+ label: &str,
+ disambiguator: u32,
+ cpp_like_debuginfo: bool,
+ output: &mut String,
+) {
+ if cpp_like_debuginfo {
+ write!(output, "{}${}", label, disambiguator).unwrap();
+ } else {
+ write!(output, "{{{}#{}}}", label, disambiguator).unwrap();
+ }
+}
+
+fn push_unqualified_item_name(
+ tcx: TyCtxt<'_>,
+ def_id: DefId,
+ disambiguated_data: DisambiguatedDefPathData,
+ output: &mut String,
+) {
+ match disambiguated_data.data {
+ DefPathData::CrateRoot => {
+ output.push_str(tcx.crate_name(def_id.krate).as_str());
+ }
+ DefPathData::ClosureExpr => {
+ let label = generator_kind_label(tcx.generator_kind(def_id));
+
+ push_disambiguated_special_name(
+ label,
+ disambiguated_data.disambiguator,
+ cpp_like_debuginfo(tcx),
+ output,
+ );
+ }
+ _ => match disambiguated_data.data.name() {
+ DefPathDataName::Named(name) => {
+ output.push_str(name.as_str());
+ }
+ DefPathDataName::Anon { namespace } => {
+ push_disambiguated_special_name(
+ namespace.as_str(),
+ disambiguated_data.disambiguator,
+ cpp_like_debuginfo(tcx),
+ output,
+ );
+ }
+ },
+ };
+}
+
+fn push_generic_params_internal<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+ output: &mut String,
+ visited: &mut FxHashSet<Ty<'tcx>>,
+) -> bool {
+ if substs.non_erasable_generics().next().is_none() {
+ return false;
+ }
+
+ debug_assert_eq!(substs, tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), substs));
+
+ let cpp_like_debuginfo = cpp_like_debuginfo(tcx);
+
+ output.push('<');
+
+ for type_parameter in substs.non_erasable_generics() {
+ match type_parameter {
+ GenericArgKind::Type(type_parameter) => {
+ push_debuginfo_type_name(tcx, type_parameter, true, output, visited);
+ }
+ GenericArgKind::Const(ct) => {
+ push_const_param(tcx, ct, output);
+ }
+ other => bug!("Unexpected non-erasable generic: {:?}", other),
+ }
+
+ push_arg_separator(cpp_like_debuginfo, output);
+ }
+ pop_arg_separator(output);
+ push_close_angle_bracket(cpp_like_debuginfo, output);
+
+ true
+}
+
+fn push_const_param<'tcx>(tcx: TyCtxt<'tcx>, ct: ty::Const<'tcx>, output: &mut String) {
+ match ct.kind() {
+ ty::ConstKind::Param(param) => {
+ write!(output, "{}", param.name)
+ }
+ _ => match ct.ty().kind() {
+ ty::Int(ity) => {
+ let bits = ct.eval_bits(tcx, ty::ParamEnv::reveal_all(), ct.ty());
+ let val = Integer::from_int_ty(&tcx, *ity).size().sign_extend(bits) as i128;
+ write!(output, "{}", val)
+ }
+ ty::Uint(_) => {
+ let val = ct.eval_bits(tcx, ty::ParamEnv::reveal_all(), ct.ty());
+ write!(output, "{}", val)
+ }
+ ty::Bool => {
+ let val = ct.try_eval_bool(tcx, ty::ParamEnv::reveal_all()).unwrap();
+ write!(output, "{}", val)
+ }
+ _ => {
+ // If we cannot evaluate the constant to a known type, we fall back
+ // to emitting a stable hash value of the constant. This isn't very pretty
+ // but we get a deterministic, virtually unique value for the constant.
+ //
+ // Let's only emit 64 bits of the hash value. That should be plenty for
+ // avoiding collisions and will make the emitted type names shorter.
+ let hash_short = tcx.with_stable_hashing_context(|mut hcx| {
+ let mut hasher = StableHasher::new();
+ let ct = ct.eval(tcx, ty::ParamEnv::reveal_all());
+ hcx.while_hashing_spans(false, |hcx| {
+ ct.to_valtree().hash_stable(hcx, &mut hasher)
+ });
+ // Note: Don't use `StableHashResult` impl of `u64` here directly, since that
+ // would lead to endianness problems.
+ let hash: u128 = hasher.finish();
+ (hash.to_le() as u64).to_le()
+ });
+
+ if cpp_like_debuginfo(tcx) {
+ write!(output, "CONST${:x}", hash_short)
+ } else {
+ write!(output, "{{CONST#{:x}}}", hash_short)
+ }
+ }
+ },
+ }
+ .unwrap();
+}
+
+pub fn push_generic_params<'tcx>(tcx: TyCtxt<'tcx>, substs: SubstsRef<'tcx>, output: &mut String) {
+ let _prof = tcx.prof.generic_activity("compute_debuginfo_type_name");
+ let mut visited = FxHashSet::default();
+ push_generic_params_internal(tcx, substs, output, &mut visited);
+}
+
+fn push_closure_or_generator_name<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ qualified: bool,
+ output: &mut String,
+ visited: &mut FxHashSet<Ty<'tcx>>,
+) {
+ // Name will be "{closure_env#0}<T1, T2, ...>", "{generator_env#0}<T1, T2, ...>", or
+ // "{async_fn_env#0}<T1, T2, ...>", etc.
+ let def_key = tcx.def_key(def_id);
+ let generator_kind = tcx.generator_kind(def_id);
+
+ if qualified {
+ let parent_def_id = DefId { index: def_key.parent.unwrap(), ..def_id };
+ push_item_name(tcx, parent_def_id, true, output);
+ output.push_str("::");
+ }
+
+ let mut label = String::with_capacity(20);
+ write!(&mut label, "{}_env", generator_kind_label(generator_kind)).unwrap();
+
+ push_disambiguated_special_name(
+ &label,
+ def_key.disambiguated_data.disambiguator,
+ cpp_like_debuginfo(tcx),
+ output,
+ );
+
+ // We also need to add the generic arguments of the async fn/generator or
+ // the enclosing function (for closures or async blocks), so that we end
+ // up with a unique name for every instantiation.
+
+ // Find the generics of the enclosing function, as defined in the source code.
+ let enclosing_fn_def_id = tcx.typeck_root_def_id(def_id);
+ let generics = tcx.generics_of(enclosing_fn_def_id);
+
+ // Truncate the substs to the length of the above generics. This will cut off
+ // anything closure- or generator-specific.
+ let substs = substs.truncate_to(tcx, generics);
+ push_generic_params_internal(tcx, substs, output, visited);
+}
+
+fn push_close_angle_bracket(cpp_like_debuginfo: bool, output: &mut String) {
+ // MSVC debugger always treats `>>` as a shift, even when parsing templates,
+ // so add a space to avoid confusion.
+ if cpp_like_debuginfo && output.ends_with('>') {
+ output.push(' ')
+ };
+
+ output.push('>');
+}
+
+fn pop_close_angle_bracket(output: &mut String) {
+ assert!(output.ends_with('>'), "'output' does not end with '>': {}", output);
+ output.pop();
+ if output.ends_with(' ') {
+ output.pop();
+ }
+}
+
+fn push_arg_separator(cpp_like_debuginfo: bool, output: &mut String) {
+ // Natvis does not always like having spaces between parts of the type name
+ // and this causes issues when we need to write a typename in natvis, for example
+ // as part of a cast like the `HashMap` visualizer does.
+ if cpp_like_debuginfo {
+ output.push(',');
+ } else {
+ output.push_str(", ");
+ };
+}
+
+fn pop_arg_separator(output: &mut String) {
+ if output.ends_with(' ') {
+ output.pop();
+ }
+
+ assert!(output.ends_with(','));
+
+ output.pop();
+}
+
+/// Check if we should generate C++ like names and debug information.
+pub fn cpp_like_debuginfo(tcx: TyCtxt<'_>) -> bool {
+ tcx.sess.target.is_like_msvc
+}
diff --git a/compiler/rustc_codegen_ssa/src/glue.rs b/compiler/rustc_codegen_ssa/src/glue.rs
new file mode 100644
index 000000000..e6f402ef1
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/glue.rs
@@ -0,0 +1,123 @@
+//!
+//
+// Code relating to drop glue.
+
+use crate::common::IntPredicate;
+use crate::meth;
+use crate::traits::*;
+use rustc_middle::ty::{self, Ty};
+use rustc_target::abi::WrappingRange;
+
+pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ t: Ty<'tcx>,
+ info: Option<Bx::Value>,
+) -> (Bx::Value, Bx::Value) {
+ let layout = bx.layout_of(t);
+ debug!("size_and_align_of_dst(ty={}, info={:?}): layout: {:?}", t, info, layout);
+ if !layout.is_unsized() {
+ let size = bx.const_usize(layout.size.bytes());
+ let align = bx.const_usize(layout.align.abi.bytes());
+ return (size, align);
+ }
+ match t.kind() {
+ ty::Dynamic(..) => {
+ // Load size/align from vtable.
+ let vtable = info.unwrap();
+ let size = meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_SIZE)
+ .get_usize(bx, vtable);
+ let align = meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_ALIGN)
+ .get_usize(bx, vtable);
+
+ // Alignment is always nonzero.
+ bx.range_metadata(align, WrappingRange { start: 1, end: !0 });
+
+ (size, align)
+ }
+ ty::Slice(_) | ty::Str => {
+ let unit = layout.field(bx, 0);
+ // The info in this case is the length of the str, so the size is that
+ // times the unit size.
+ (
+ // All slice sizes must fit into `isize`, so this multiplication cannot (signed) wrap.
+ // NOTE: ideally, we want the effects of both `unchecked_smul` and `unchecked_umul`
+ // (resulting in `mul nsw nuw` in LLVM IR), since we know that the multiplication
+ // cannot signed wrap, and that both operands are non-negative. But at the time of writing,
+ // `BuilderMethods` can't do this, and it doesn't seem to enable any further optimizations.
+ bx.unchecked_smul(info.unwrap(), bx.const_usize(unit.size.bytes())),
+ bx.const_usize(unit.align.abi.bytes()),
+ )
+ }
+ _ => {
+ // First get the size of all statically known fields.
+ // Don't use size_of because it also rounds up to alignment, which we
+ // want to avoid, as the unsized field's alignment could be smaller.
+ assert!(!t.is_simd());
+ debug!("DST {} layout: {:?}", t, layout);
+
+ let i = layout.fields.count() - 1;
+ let sized_size = layout.fields.offset(i).bytes();
+ let sized_align = layout.align.abi.bytes();
+ debug!("DST {} statically sized prefix size: {} align: {}", t, sized_size, sized_align);
+ let sized_size = bx.const_usize(sized_size);
+ let sized_align = bx.const_usize(sized_align);
+
+ // Recurse to get the size of the dynamically sized field (must be
+ // the last field).
+ let field_ty = layout.field(bx, i).ty;
+ let (unsized_size, mut unsized_align) = size_and_align_of_dst(bx, field_ty, info);
+
+ // FIXME (#26403, #27023): We should be adding padding
+ // to `sized_size` (to accommodate the `unsized_align`
+ // required of the unsized field that follows) before
+ // summing it with `sized_size`. (Note that since #26403
+ // is unfixed, we do not yet add the necessary padding
+ // here. But this is where the add would go.)
+
+ // Return the sum of sizes and max of aligns.
+ let size = bx.add(sized_size, unsized_size);
+
+ // Packed types ignore the alignment of their fields.
+ if let ty::Adt(def, _) = t.kind() {
+ if def.repr().packed() {
+ unsized_align = sized_align;
+ }
+ }
+
+ // Choose max of two known alignments (combined value must
+ // be aligned according to more restrictive of the two).
+ let align = match (
+ bx.const_to_opt_u128(sized_align, false),
+ bx.const_to_opt_u128(unsized_align, false),
+ ) {
+ (Some(sized_align), Some(unsized_align)) => {
+ // If both alignments are constant, (the sized_align should always be), then
+ // pick the correct alignment statically.
+ bx.const_usize(std::cmp::max(sized_align, unsized_align) as u64)
+ }
+ _ => {
+ let cmp = bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align);
+ bx.select(cmp, sized_align, unsized_align)
+ }
+ };
+
+ // Issue #27023: must add any necessary padding to `size`
+ // (to make it a multiple of `align`) before returning it.
+ //
+ // Namely, the returned size should be, in C notation:
+ //
+ // `size + ((size & (align-1)) ? align : 0)`
+ //
+ // emulated via the semi-standard fast bit trick:
+ //
+ // `(size + (align-1)) & -align`
+ let one = bx.const_usize(1);
+ let addend = bx.sub(align, one);
+ let add = bx.add(size, addend);
+ let neg = bx.neg(align);
+ let size = bx.and(add, neg);
+
+ (size, align)
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs
new file mode 100644
index 000000000..1802eedf1
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/lib.rs
@@ -0,0 +1,244 @@
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(box_patterns)]
+#![feature(try_blocks)]
+#![feature(let_else)]
+#![feature(once_cell)]
+#![feature(associated_type_bounds)]
+#![feature(strict_provenance)]
+#![feature(int_roundings)]
+#![feature(if_let_guard)]
+#![recursion_limit = "256"]
+#![allow(rustc::potential_query_instability)]
+
+//! This crate contains codegen code that is used by all codegen backends (LLVM and others).
+//! The backend-agnostic functions of this crate use functions defined in various traits that
+//! have to be implemented by each backends.
+
+#[macro_use]
+extern crate rustc_macros;
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate rustc_middle;
+
+use rustc_ast as ast;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::sync::Lrc;
+use rustc_hir::def_id::CrateNum;
+use rustc_hir::LangItem;
+use rustc_middle::dep_graph::WorkProduct;
+use rustc_middle::middle::dependency_format::Dependencies;
+use rustc_middle::middle::exported_symbols::SymbolExportKind;
+use rustc_middle::ty::query::{ExternProviders, Providers};
+use rustc_serialize::opaque::{MemDecoder, MemEncoder};
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use rustc_session::config::{CrateType, OutputFilenames, OutputType, RUST_CGU_EXT};
+use rustc_session::cstore::{self, CrateSource};
+use rustc_session::utils::NativeLibKind;
+use rustc_span::symbol::Symbol;
+use rustc_span::DebuggerVisualizerFile;
+use std::collections::BTreeSet;
+use std::path::{Path, PathBuf};
+
+pub mod back;
+pub mod base;
+pub mod common;
+pub mod coverageinfo;
+pub mod debuginfo;
+pub mod glue;
+pub mod meth;
+pub mod mir;
+pub mod mono_item;
+pub mod target_features;
+pub mod traits;
+
+pub struct ModuleCodegen<M> {
+ /// The name of the module. When the crate may be saved between
+ /// compilations, incremental compilation requires that name be
+ /// unique amongst **all** crates. Therefore, it should contain
+ /// something unique to this crate (e.g., a module path) as well
+ /// as the crate name and disambiguator.
+ /// We currently generate these names via CodegenUnit::build_cgu_name().
+ pub name: String,
+ pub module_llvm: M,
+ pub kind: ModuleKind,
+}
+
+impl<M> ModuleCodegen<M> {
+ pub fn into_compiled_module(
+ self,
+ emit_obj: bool,
+ emit_dwarf_obj: bool,
+ emit_bc: bool,
+ outputs: &OutputFilenames,
+ ) -> CompiledModule {
+ let object = emit_obj.then(|| outputs.temp_path(OutputType::Object, Some(&self.name)));
+ let dwarf_object = emit_dwarf_obj.then(|| outputs.temp_path_dwo(Some(&self.name)));
+ let bytecode = emit_bc.then(|| outputs.temp_path(OutputType::Bitcode, Some(&self.name)));
+
+ CompiledModule { name: self.name.clone(), kind: self.kind, object, dwarf_object, bytecode }
+ }
+}
+
+#[derive(Debug, Encodable, Decodable)]
+pub struct CompiledModule {
+ pub name: String,
+ pub kind: ModuleKind,
+ pub object: Option<PathBuf>,
+ pub dwarf_object: Option<PathBuf>,
+ pub bytecode: Option<PathBuf>,
+}
+
+pub struct CachedModuleCodegen {
+ pub name: String,
+ pub source: WorkProduct,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Encodable, Decodable)]
+pub enum ModuleKind {
+ Regular,
+ Metadata,
+ Allocator,
+}
+
+bitflags::bitflags! {
+ pub struct MemFlags: u8 {
+ const VOLATILE = 1 << 0;
+ const NONTEMPORAL = 1 << 1;
+ const UNALIGNED = 1 << 2;
+ }
+}
+
+#[derive(Clone, Debug, Encodable, Decodable, HashStable)]
+pub struct NativeLib {
+ pub kind: NativeLibKind,
+ pub name: Option<Symbol>,
+ pub cfg: Option<ast::MetaItem>,
+ pub verbatim: Option<bool>,
+ pub dll_imports: Vec<cstore::DllImport>,
+}
+
+impl From<&cstore::NativeLib> for NativeLib {
+ fn from(lib: &cstore::NativeLib) -> Self {
+ NativeLib {
+ kind: lib.kind,
+ name: lib.name,
+ cfg: lib.cfg.clone(),
+ verbatim: lib.verbatim,
+ dll_imports: lib.dll_imports.clone(),
+ }
+ }
+}
+
+/// Misc info we load from metadata to persist beyond the tcx.
+///
+/// Note: though `CrateNum` is only meaningful within the same tcx, information within `CrateInfo`
+/// is self-contained. `CrateNum` can be viewed as a unique identifier within a `CrateInfo`, where
+/// `used_crate_source` contains all `CrateSource` of the dependents, and maintains a mapping from
+/// identifiers (`CrateNum`) to `CrateSource`. The other fields map `CrateNum` to the crate's own
+/// additional properties, so that effectively we can retrieve each dependent crate's `CrateSource`
+/// and the corresponding properties without referencing information outside of a `CrateInfo`.
+#[derive(Debug, Encodable, Decodable)]
+pub struct CrateInfo {
+ pub target_cpu: String,
+ pub exported_symbols: FxHashMap<CrateType, Vec<String>>,
+ pub linked_symbols: FxHashMap<CrateType, Vec<(String, SymbolExportKind)>>,
+ pub local_crate_name: Symbol,
+ pub compiler_builtins: Option<CrateNum>,
+ pub profiler_runtime: Option<CrateNum>,
+ pub is_no_builtins: FxHashSet<CrateNum>,
+ pub native_libraries: FxHashMap<CrateNum, Vec<NativeLib>>,
+ pub crate_name: FxHashMap<CrateNum, Symbol>,
+ pub used_libraries: Vec<NativeLib>,
+ pub used_crate_source: FxHashMap<CrateNum, Lrc<CrateSource>>,
+ pub used_crates: Vec<CrateNum>,
+ pub lang_item_to_crate: FxHashMap<LangItem, CrateNum>,
+ pub missing_lang_items: FxHashMap<CrateNum, Vec<LangItem>>,
+ pub dependency_formats: Lrc<Dependencies>,
+ pub windows_subsystem: Option<String>,
+ pub natvis_debugger_visualizers: BTreeSet<DebuggerVisualizerFile>,
+}
+
+#[derive(Encodable, Decodable)]
+pub struct CodegenResults {
+ pub modules: Vec<CompiledModule>,
+ pub allocator_module: Option<CompiledModule>,
+ pub metadata_module: Option<CompiledModule>,
+ pub metadata: rustc_metadata::EncodedMetadata,
+ pub crate_info: CrateInfo,
+}
+
+pub fn provide(providers: &mut Providers) {
+ crate::back::symbol_export::provide(providers);
+ crate::base::provide(providers);
+ crate::target_features::provide(providers);
+}
+
+pub fn provide_extern(providers: &mut ExternProviders) {
+ crate::back::symbol_export::provide_extern(providers);
+}
+
+/// Checks if the given filename ends with the `.rcgu.o` extension that `rustc`
+/// uses for the object files it generates.
+pub fn looks_like_rust_object_file(filename: &str) -> bool {
+ let path = Path::new(filename);
+ let ext = path.extension().and_then(|s| s.to_str());
+ if ext != Some(OutputType::Object.extension()) {
+ // The file name does not end with ".o", so it can't be an object file.
+ return false;
+ }
+
+ // Strip the ".o" at the end
+ let ext2 = path.file_stem().and_then(|s| Path::new(s).extension()).and_then(|s| s.to_str());
+
+ // Check if the "inner" extension
+ ext2 == Some(RUST_CGU_EXT)
+}
+
+const RLINK_VERSION: u32 = 1;
+const RLINK_MAGIC: &[u8] = b"rustlink";
+
+const RUSTC_VERSION: Option<&str> = option_env!("CFG_VERSION");
+
+impl CodegenResults {
+ pub fn serialize_rlink(codegen_results: &CodegenResults) -> Vec<u8> {
+ let mut encoder = MemEncoder::new();
+ encoder.emit_raw_bytes(RLINK_MAGIC);
+ // `emit_raw_bytes` is used to make sure that the version representation does not depend on
+ // Encoder's inner representation of `u32`.
+ encoder.emit_raw_bytes(&RLINK_VERSION.to_be_bytes());
+ encoder.emit_str(RUSTC_VERSION.unwrap());
+ Encodable::encode(codegen_results, &mut encoder);
+ encoder.finish()
+ }
+
+ pub fn deserialize_rlink(data: Vec<u8>) -> Result<Self, String> {
+ // The Decodable machinery is not used here because it panics if the input data is invalid
+ // and because its internal representation may change.
+ if !data.starts_with(RLINK_MAGIC) {
+ return Err("The input does not look like a .rlink file".to_string());
+ }
+ let data = &data[RLINK_MAGIC.len()..];
+ if data.len() < 4 {
+ return Err("The input does not contain version number".to_string());
+ }
+
+ let mut version_array: [u8; 4] = Default::default();
+ version_array.copy_from_slice(&data[..4]);
+ if u32::from_be_bytes(version_array) != RLINK_VERSION {
+ return Err(".rlink file was produced with encoding version {version_array}, but the current version is {RLINK_VERSION}".to_string());
+ }
+
+ let mut decoder = MemDecoder::new(&data[4..], 0);
+ let rustc_version = decoder.read_str();
+ let current_version = RUSTC_VERSION.unwrap();
+ if rustc_version != current_version {
+ return Err(format!(
+ ".rlink file was produced by rustc version {rustc_version}, but the current version is {current_version}."
+ ));
+ }
+
+ let codegen_results = CodegenResults::decode(&mut decoder);
+ Ok(codegen_results)
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/meth.rs b/compiler/rustc_codegen_ssa/src/meth.rs
new file mode 100644
index 000000000..27d791d90
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/meth.rs
@@ -0,0 +1,116 @@
+use crate::traits::*;
+
+use rustc_middle::ty::{self, subst::GenericArgKind, ExistentialPredicate, Ty, TyCtxt};
+use rustc_session::config::Lto;
+use rustc_symbol_mangling::typeid_for_trait_ref;
+use rustc_target::abi::call::FnAbi;
+
+#[derive(Copy, Clone, Debug)]
+pub struct VirtualIndex(u64);
+
+impl<'a, 'tcx> VirtualIndex {
+ pub fn from_index(index: usize) -> Self {
+ VirtualIndex(index as u64)
+ }
+
+ pub fn get_fn<Bx: BuilderMethods<'a, 'tcx>>(
+ self,
+ bx: &mut Bx,
+ llvtable: Bx::Value,
+ ty: Ty<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ ) -> Bx::Value {
+ // Load the data pointer from the object.
+ debug!("get_fn({llvtable:?}, {ty:?}, {self:?})");
+ let llty = bx.fn_ptr_backend_type(fn_abi);
+ let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
+
+ if bx.cx().sess().opts.unstable_opts.virtual_function_elimination
+ && bx.cx().sess().lto() == Lto::Fat
+ {
+ let typeid =
+ bx.typeid_metadata(typeid_for_trait_ref(bx.tcx(), get_trait_ref(bx.tcx(), ty)));
+ let vtable_byte_offset = self.0 * bx.data_layout().pointer_size.bytes();
+ let type_checked_load = bx.type_checked_load(llvtable, vtable_byte_offset, typeid);
+ let func = bx.extract_value(type_checked_load, 0);
+ bx.pointercast(func, llty)
+ } else {
+ let ptr_align = bx.tcx().data_layout.pointer_align.abi;
+ let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]);
+ let ptr = bx.load(llty, gep, ptr_align);
+ bx.nonnull_metadata(ptr);
+ // VTable loads are invariant.
+ bx.set_invariant_load(ptr);
+ ptr
+ }
+ }
+
+ pub fn get_usize<Bx: BuilderMethods<'a, 'tcx>>(
+ self,
+ bx: &mut Bx,
+ llvtable: Bx::Value,
+ ) -> Bx::Value {
+ // Load the data pointer from the object.
+ debug!("get_int({:?}, {:?})", llvtable, self);
+
+ let llty = bx.type_isize();
+ let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
+ let usize_align = bx.tcx().data_layout.pointer_align.abi;
+ let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]);
+ let ptr = bx.load(llty, gep, usize_align);
+ // VTable loads are invariant.
+ bx.set_invariant_load(ptr);
+ ptr
+ }
+}
+
+fn get_trait_ref<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ty::PolyExistentialTraitRef<'tcx> {
+ for arg in ty.peel_refs().walk() {
+ if let GenericArgKind::Type(ty) = arg.unpack() {
+ if let ty::Dynamic(trait_refs, _) = ty.kind() {
+ return trait_refs[0].map_bound(|trait_ref| match trait_ref {
+ ExistentialPredicate::Trait(tr) => tr,
+ ExistentialPredicate::Projection(proj) => proj.trait_ref(tcx),
+ ExistentialPredicate::AutoTrait(_) => {
+ bug!("auto traits don't have functions")
+ }
+ });
+ }
+ }
+ }
+
+ bug!("expected a `dyn Trait` ty, found {ty:?}")
+}
+
+/// Creates a dynamic vtable for the given type and vtable origin.
+/// This is used only for objects.
+///
+/// The vtables are cached instead of created on every call.
+///
+/// The `trait_ref` encodes the erased self type. Hence if we are
+/// making an object `Foo<dyn Trait>` from a value of type `Foo<T>`, then
+/// `trait_ref` would map `T: Trait`.
+pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>(
+ cx: &Cx,
+ ty: Ty<'tcx>,
+ trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+) -> Cx::Value {
+ let tcx = cx.tcx();
+
+ debug!("get_vtable(ty={:?}, trait_ref={:?})", ty, trait_ref);
+
+ // Check the cache.
+ if let Some(&val) = cx.vtables().borrow().get(&(ty, trait_ref)) {
+ return val;
+ }
+
+ let vtable_alloc_id = tcx.vtable_allocation((ty, trait_ref));
+ let vtable_allocation = tcx.global_alloc(vtable_alloc_id).unwrap_memory();
+ let vtable_const = cx.const_data_from_alloc(vtable_allocation);
+ let align = cx.data_layout().pointer_align.abi;
+ let vtable = cx.static_addr_of(vtable_const, align, Some("vtable"));
+
+ cx.create_vtable_debuginfo(ty, trait_ref, vtable);
+ cx.vtables().borrow_mut().insert((ty, trait_ref), vtable);
+ vtable
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
new file mode 100644
index 000000000..24da48ead
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
@@ -0,0 +1,368 @@
+//! An analysis to determine which locals require allocas and
+//! which do not.
+
+use super::FunctionCx;
+use crate::traits::*;
+use rustc_data_structures::graph::dominators::Dominators;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::traversal;
+use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::{self, Location, TerminatorKind};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
+
+pub fn non_ssa_locals<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ fx: &FunctionCx<'a, 'tcx, Bx>,
+) -> BitSet<mir::Local> {
+ let mir = fx.mir;
+ let dominators = mir.basic_blocks.dominators();
+ let locals = mir
+ .local_decls
+ .iter()
+ .map(|decl| {
+ let ty = fx.monomorphize(decl.ty);
+ let layout = fx.cx.spanned_layout_of(ty, decl.source_info.span);
+ if layout.is_zst() {
+ LocalKind::ZST
+ } else if fx.cx.is_backend_immediate(layout) || fx.cx.is_backend_scalar_pair(layout) {
+ LocalKind::Unused
+ } else {
+ LocalKind::Memory
+ }
+ })
+ .collect();
+
+ let mut analyzer = LocalAnalyzer { fx, dominators, locals };
+
+ // Arguments get assigned to by means of the function being called
+ for arg in mir.args_iter() {
+ analyzer.assign(arg, mir::START_BLOCK.start_location());
+ }
+
+ // If there exists a local definition that dominates all uses of that local,
+ // the definition should be visited first. Traverse blocks in an order that
+ // is a topological sort of dominance partial order.
+ for (bb, data) in traversal::reverse_postorder(&mir) {
+ analyzer.visit_basic_block_data(bb, data);
+ }
+
+ let mut non_ssa_locals = BitSet::new_empty(analyzer.locals.len());
+ for (local, kind) in analyzer.locals.iter_enumerated() {
+ if matches!(kind, LocalKind::Memory) {
+ non_ssa_locals.insert(local);
+ }
+ }
+
+ non_ssa_locals
+}
+
+#[derive(Copy, Clone, PartialEq, Eq)]
+enum LocalKind {
+ ZST,
+ /// A local that requires an alloca.
+ Memory,
+ /// A scalar or a scalar pair local that is neither defined nor used.
+ Unused,
+ /// A scalar or a scalar pair local with a single definition that dominates all uses.
+ SSA(mir::Location),
+}
+
+struct LocalAnalyzer<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
+ fx: &'mir FunctionCx<'a, 'tcx, Bx>,
+ dominators: Dominators<mir::BasicBlock>,
+ locals: IndexVec<mir::Local, LocalKind>,
+}
+
+impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> LocalAnalyzer<'mir, 'a, 'tcx, Bx> {
+ fn assign(&mut self, local: mir::Local, location: Location) {
+ let kind = &mut self.locals[local];
+ match *kind {
+ LocalKind::ZST => {}
+ LocalKind::Memory => {}
+ LocalKind::Unused => {
+ *kind = LocalKind::SSA(location);
+ }
+ LocalKind::SSA(_) => {
+ *kind = LocalKind::Memory;
+ }
+ }
+ }
+
+ fn process_place(
+ &mut self,
+ place_ref: &mir::PlaceRef<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ let cx = self.fx.cx;
+
+ if let Some((place_base, elem)) = place_ref.last_projection() {
+ let mut base_context = if context.is_mutating_use() {
+ PlaceContext::MutatingUse(MutatingUseContext::Projection)
+ } else {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection)
+ };
+
+ // Allow uses of projections that are ZSTs or from scalar fields.
+ let is_consume = matches!(
+ context,
+ PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::Copy | NonMutatingUseContext::Move,
+ )
+ );
+ if is_consume {
+ let base_ty = place_base.ty(self.fx.mir, cx.tcx());
+ let base_ty = self.fx.monomorphize(base_ty);
+
+ // ZSTs don't require any actual memory access.
+ let elem_ty = base_ty.projection_ty(cx.tcx(), self.fx.monomorphize(elem)).ty;
+ let span = self.fx.mir.local_decls[place_ref.local].source_info.span;
+ if cx.spanned_layout_of(elem_ty, span).is_zst() {
+ return;
+ }
+
+ if let mir::ProjectionElem::Field(..) = elem {
+ let layout = cx.spanned_layout_of(base_ty.ty, span);
+ if cx.is_backend_immediate(layout) || cx.is_backend_scalar_pair(layout) {
+ // Recurse with the same context, instead of `Projection`,
+ // potentially stopping at non-operand projections,
+ // which would trigger `not_ssa` on locals.
+ base_context = context;
+ }
+ }
+ }
+
+ if let mir::ProjectionElem::Deref = elem {
+ // Deref projections typically only read the pointer.
+ base_context = PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy);
+ }
+
+ self.process_place(&place_base, base_context, location);
+ // HACK(eddyb) this emulates the old `visit_projection_elem`, this
+ // entire `visit_place`-like `process_place` method should be rewritten,
+ // now that we have moved to the "slice of projections" representation.
+ if let mir::ProjectionElem::Index(local) = elem {
+ self.visit_local(
+ local,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy),
+ location,
+ );
+ }
+ } else {
+ self.visit_local(place_ref.local, context, location);
+ }
+ }
+}
+
+impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx>
+ for LocalAnalyzer<'mir, 'a, 'tcx, Bx>
+{
+ fn visit_assign(
+ &mut self,
+ place: &mir::Place<'tcx>,
+ rvalue: &mir::Rvalue<'tcx>,
+ location: Location,
+ ) {
+ debug!("visit_assign(place={:?}, rvalue={:?})", place, rvalue);
+
+ if let Some(local) = place.as_local() {
+ self.assign(local, location);
+ if self.locals[local] != LocalKind::Memory {
+ let decl_span = self.fx.mir.local_decls[local].source_info.span;
+ if !self.fx.rvalue_creates_operand(rvalue, decl_span) {
+ self.locals[local] = LocalKind::Memory;
+ }
+ }
+ } else {
+ self.visit_place(place, PlaceContext::MutatingUse(MutatingUseContext::Store), location);
+ }
+
+ self.visit_rvalue(rvalue, location);
+ }
+
+ fn visit_place(&mut self, place: &mir::Place<'tcx>, context: PlaceContext, location: Location) {
+ debug!("visit_place(place={:?}, context={:?})", place, context);
+ self.process_place(&place.as_ref(), context, location);
+ }
+
+ fn visit_local(&mut self, local: mir::Local, context: PlaceContext, location: Location) {
+ match context {
+ PlaceContext::MutatingUse(MutatingUseContext::Call)
+ | PlaceContext::MutatingUse(MutatingUseContext::Yield) => {
+ self.assign(local, location);
+ }
+
+ PlaceContext::NonUse(_) | PlaceContext::MutatingUse(MutatingUseContext::Retag) => {}
+
+ PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::Copy | NonMutatingUseContext::Move,
+ ) => match &mut self.locals[local] {
+ LocalKind::ZST => {}
+ LocalKind::Memory => {}
+ LocalKind::SSA(def) if def.dominates(location, &self.dominators) => {}
+ // Reads from uninitialized variables (e.g., in dead code, after
+ // optimizations) require locals to be in (uninitialized) memory.
+ // N.B., there can be uninitialized reads of a local visited after
+ // an assignment to that local, if they happen on disjoint paths.
+ kind @ (LocalKind::Unused | LocalKind::SSA(_)) => {
+ *kind = LocalKind::Memory;
+ }
+ },
+
+ PlaceContext::MutatingUse(
+ MutatingUseContext::Store
+ | MutatingUseContext::Deinit
+ | MutatingUseContext::SetDiscriminant
+ | MutatingUseContext::AsmOutput
+ | MutatingUseContext::Borrow
+ | MutatingUseContext::AddressOf
+ | MutatingUseContext::Projection,
+ )
+ | PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::Inspect
+ | NonMutatingUseContext::SharedBorrow
+ | NonMutatingUseContext::UniqueBorrow
+ | NonMutatingUseContext::ShallowBorrow
+ | NonMutatingUseContext::AddressOf
+ | NonMutatingUseContext::Projection,
+ ) => {
+ self.locals[local] = LocalKind::Memory;
+ }
+
+ PlaceContext::MutatingUse(MutatingUseContext::Drop) => {
+ let kind = &mut self.locals[local];
+ if *kind != LocalKind::Memory {
+ let ty = self.fx.mir.local_decls[local].ty;
+ let ty = self.fx.monomorphize(ty);
+ if self.fx.cx.type_needs_drop(ty) {
+ // Only need the place if we're actually dropping it.
+ *kind = LocalKind::Memory;
+ }
+ }
+ }
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum CleanupKind {
+ NotCleanup,
+ Funclet,
+ Internal { funclet: mir::BasicBlock },
+}
+
+impl CleanupKind {
+ pub fn funclet_bb(self, for_bb: mir::BasicBlock) -> Option<mir::BasicBlock> {
+ match self {
+ CleanupKind::NotCleanup => None,
+ CleanupKind::Funclet => Some(for_bb),
+ CleanupKind::Internal { funclet } => Some(funclet),
+ }
+ }
+}
+
+pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKind> {
+ fn discover_masters<'tcx>(
+ result: &mut IndexVec<mir::BasicBlock, CleanupKind>,
+ mir: &mir::Body<'tcx>,
+ ) {
+ for (bb, data) in mir.basic_blocks().iter_enumerated() {
+ match data.terminator().kind {
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Unreachable
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. } => { /* nothing to do */ }
+ TerminatorKind::Call { cleanup: unwind, .. }
+ | TerminatorKind::InlineAsm { cleanup: unwind, .. }
+ | TerminatorKind::Assert { cleanup: unwind, .. }
+ | TerminatorKind::DropAndReplace { unwind, .. }
+ | TerminatorKind::Drop { unwind, .. } => {
+ if let Some(unwind) = unwind {
+ debug!(
+ "cleanup_kinds: {:?}/{:?} registering {:?} as funclet",
+ bb, data, unwind
+ );
+ result[unwind] = CleanupKind::Funclet;
+ }
+ }
+ }
+ }
+ }
+
+ fn propagate<'tcx>(result: &mut IndexVec<mir::BasicBlock, CleanupKind>, mir: &mir::Body<'tcx>) {
+ let mut funclet_succs = IndexVec::from_elem(None, mir.basic_blocks());
+
+ let mut set_successor = |funclet: mir::BasicBlock, succ| match funclet_succs[funclet] {
+ ref mut s @ None => {
+ debug!("set_successor: updating successor of {:?} to {:?}", funclet, succ);
+ *s = Some(succ);
+ }
+ Some(s) => {
+ if s != succ {
+ span_bug!(
+ mir.span,
+ "funclet {:?} has 2 parents - {:?} and {:?}",
+ funclet,
+ s,
+ succ
+ );
+ }
+ }
+ };
+
+ for (bb, data) in traversal::reverse_postorder(mir) {
+ let funclet = match result[bb] {
+ CleanupKind::NotCleanup => continue,
+ CleanupKind::Funclet => bb,
+ CleanupKind::Internal { funclet } => funclet,
+ };
+
+ debug!(
+ "cleanup_kinds: {:?}/{:?}/{:?} propagating funclet {:?}",
+ bb, data, result[bb], funclet
+ );
+
+ for succ in data.terminator().successors() {
+ let kind = result[succ];
+ debug!("cleanup_kinds: propagating {:?} to {:?}/{:?}", funclet, succ, kind);
+ match kind {
+ CleanupKind::NotCleanup => {
+ result[succ] = CleanupKind::Internal { funclet };
+ }
+ CleanupKind::Funclet => {
+ if funclet != succ {
+ set_successor(funclet, succ);
+ }
+ }
+ CleanupKind::Internal { funclet: succ_funclet } => {
+ if funclet != succ_funclet {
+ // `succ` has 2 different funclet going into it, so it must
+ // be a funclet by itself.
+
+ debug!(
+ "promoting {:?} to a funclet and updating {:?}",
+ succ, succ_funclet
+ );
+ result[succ] = CleanupKind::Funclet;
+ set_successor(succ_funclet, succ);
+ set_successor(funclet, succ);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ let mut result = IndexVec::from_elem(CleanupKind::NotCleanup, mir.basic_blocks());
+
+ discover_masters(&mut result, mir);
+ propagate(&mut result, mir);
+ debug!("cleanup_kinds: result={:?}", result);
+ result
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
new file mode 100644
index 000000000..3eee58d9d
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -0,0 +1,1654 @@
+use super::operand::OperandRef;
+use super::operand::OperandValue::{Immediate, Pair, Ref};
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+use crate::base;
+use crate::common::{self, IntPredicate};
+use crate::meth;
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_ast as ast;
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_hir::lang_items::LangItem;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::AssertKind;
+use rustc_middle::mir::{self, SwitchTargets};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
+use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
+use rustc_middle::ty::{self, Instance, Ty, TypeVisitable};
+use rustc_span::source_map::Span;
+use rustc_span::{sym, Symbol};
+use rustc_symbol_mangling::typeid::typeid_for_fnabi;
+use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
+use rustc_target::abi::{self, HasDataLayout, WrappingRange};
+use rustc_target::spec::abi::Abi;
+
+/// Used by `FunctionCx::codegen_terminator` for emitting common patterns
+/// e.g., creating a basic block, calling a function, etc.
+struct TerminatorCodegenHelper<'tcx> {
+ bb: mir::BasicBlock,
+ terminator: &'tcx mir::Terminator<'tcx>,
+ funclet_bb: Option<mir::BasicBlock>,
+}
+
+impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
+ /// Returns the appropriate `Funclet` for the current funclet, if on MSVC,
+ /// either already previously cached, or newly created, by `landing_pad_for`.
+ fn funclet<'b, Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &'b mut FunctionCx<'a, 'tcx, Bx>,
+ ) -> Option<&'b Bx::Funclet> {
+ let funclet_bb = self.funclet_bb?;
+ if base::wants_msvc_seh(fx.cx.tcx().sess) {
+ // If `landing_pad_for` hasn't been called yet to create the `Funclet`,
+ // it has to be now. This may not seem necessary, as RPO should lead
+ // to all the unwind edges being visited (and so to `landing_pad_for`
+ // getting called for them), before building any of the blocks inside
+ // the funclet itself - however, if MIR contains edges that end up not
+ // being needed in the LLVM IR after monomorphization, the funclet may
+ // be unreachable, and we don't have yet a way to skip building it in
+ // such an eventuality (which may be a better solution than this).
+ if fx.funclets[funclet_bb].is_none() {
+ fx.landing_pad_for(funclet_bb);
+ }
+
+ Some(
+ fx.funclets[funclet_bb]
+ .as_ref()
+ .expect("landing_pad_for didn't also create funclets entry"),
+ )
+ } else {
+ None
+ }
+ }
+
+ fn lltarget<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ target: mir::BasicBlock,
+ ) -> (Bx::BasicBlock, bool) {
+ let span = self.terminator.source_info.span;
+ let lltarget = fx.llbb(target);
+ let target_funclet = fx.cleanup_kinds[target].funclet_bb(target);
+ match (self.funclet_bb, target_funclet) {
+ (None, None) => (lltarget, false),
+ (Some(f), Some(t_f)) if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) => {
+ (lltarget, false)
+ }
+ // jump *into* cleanup - need a landing pad if GNU, cleanup pad if MSVC
+ (None, Some(_)) => (fx.landing_pad_for(target), false),
+ (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", self.terminator),
+ (Some(_), Some(_)) => (fx.landing_pad_for(target), true),
+ }
+ }
+
+ /// Create a basic block.
+ fn llblock<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ target: mir::BasicBlock,
+ ) -> Bx::BasicBlock {
+ let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+ if is_cleanupret {
+ // MSVC cross-funclet jump - need a trampoline
+
+ debug!("llblock: creating cleanup trampoline for {:?}", target);
+ let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target);
+ let trampoline = Bx::append_block(fx.cx, fx.llfn, name);
+ let mut trampoline_bx = Bx::build(fx.cx, trampoline);
+ trampoline_bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
+ trampoline
+ } else {
+ lltarget
+ }
+ }
+
+ fn funclet_br<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ bx: &mut Bx,
+ target: mir::BasicBlock,
+ ) {
+ let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+ if is_cleanupret {
+ // micro-optimization: generate a `ret` rather than a jump
+ // to a trampoline.
+ bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
+ } else {
+ bx.br(lltarget);
+ }
+ }
+
+ /// Call `fn_ptr` of `fn_abi` with the arguments `llargs`, the optional
+ /// return destination `destination` and the cleanup function `cleanup`.
+ fn do_call<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ bx: &mut Bx,
+ fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>,
+ fn_ptr: Bx::Value,
+ llargs: &[Bx::Value],
+ destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>,
+ cleanup: Option<mir::BasicBlock>,
+ copied_constant_arguments: &[PlaceRef<'tcx, <Bx as BackendTypes>::Value>],
+ ) {
+ // If there is a cleanup block and the function we're calling can unwind, then
+ // do an invoke, otherwise do a call.
+ let fn_ty = bx.fn_decl_backend_type(&fn_abi);
+
+ let unwind_block = if let Some(cleanup) = cleanup.filter(|_| fn_abi.can_unwind) {
+ Some(self.llblock(fx, cleanup))
+ } else if fx.mir[self.bb].is_cleanup
+ && fn_abi.can_unwind
+ && !base::wants_msvc_seh(fx.cx.tcx().sess)
+ {
+ // Exception must not propagate out of the execution of a cleanup (doing so
+ // can cause undefined behaviour). We insert a double unwind guard for
+ // functions that can potentially unwind to protect against this.
+ //
+ // This is not necessary for SEH which does not use successive unwinding
+ // like Itanium EH. EH frames in SEH are different from normal function
+ // frames and SEH will abort automatically if an exception tries to
+ // propagate out from cleanup.
+ Some(fx.double_unwind_guard())
+ } else {
+ None
+ };
+
+ if let Some(unwind_block) = unwind_block {
+ let ret_llbb = if let Some((_, target)) = destination {
+ fx.llbb(target)
+ } else {
+ fx.unreachable_block()
+ };
+ let invokeret =
+ bx.invoke(fn_ty, fn_ptr, &llargs, ret_llbb, unwind_block, self.funclet(fx));
+ bx.apply_attrs_callsite(&fn_abi, invokeret);
+ if fx.mir[self.bb].is_cleanup {
+ bx.do_not_inline(invokeret);
+ }
+
+ if let Some((ret_dest, target)) = destination {
+ bx.switch_to_block(fx.llbb(target));
+ fx.set_debug_loc(bx, self.terminator.source_info);
+ for tmp in copied_constant_arguments {
+ bx.lifetime_end(tmp.llval, tmp.layout.size);
+ }
+ fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret);
+ }
+ } else {
+ let llret = bx.call(fn_ty, fn_ptr, &llargs, self.funclet(fx));
+ bx.apply_attrs_callsite(&fn_abi, llret);
+ if fx.mir[self.bb].is_cleanup {
+ // Cleanup is always the cold path. Don't inline
+ // drop glue. Also, when there is a deeply-nested
+ // struct, there are "symmetry" issues that cause
+ // exponential inlining - see issue #41696.
+ bx.do_not_inline(llret);
+ }
+
+ if let Some((ret_dest, target)) = destination {
+ for tmp in copied_constant_arguments {
+ bx.lifetime_end(tmp.llval, tmp.layout.size);
+ }
+ fx.store_return(bx, ret_dest, &fn_abi.ret, llret);
+ self.funclet_br(fx, bx, target);
+ } else {
+ bx.unreachable();
+ }
+ }
+ }
+
+ /// Generates inline assembly with optional `destination` and `cleanup`.
+ fn do_inlineasm<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ bx: &mut Bx,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[InlineAsmOperandRef<'tcx, Bx>],
+ options: InlineAsmOptions,
+ line_spans: &[Span],
+ destination: Option<mir::BasicBlock>,
+ cleanup: Option<mir::BasicBlock>,
+ instance: Instance<'_>,
+ ) {
+ if let Some(cleanup) = cleanup {
+ let ret_llbb = if let Some(target) = destination {
+ fx.llbb(target)
+ } else {
+ fx.unreachable_block()
+ };
+
+ bx.codegen_inline_asm(
+ template,
+ &operands,
+ options,
+ line_spans,
+ instance,
+ Some((ret_llbb, self.llblock(fx, cleanup), self.funclet(fx))),
+ );
+ } else {
+ bx.codegen_inline_asm(template, &operands, options, line_spans, instance, None);
+
+ if let Some(target) = destination {
+ self.funclet_br(fx, bx, target);
+ } else {
+ bx.unreachable();
+ }
+ }
+ }
+}
+
+/// Codegen implementations for some terminator variants.
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ /// Generates code for a `Resume` terminator.
+ fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, mut bx: Bx) {
+ if let Some(funclet) = helper.funclet(self) {
+ bx.cleanup_ret(funclet, None);
+ } else {
+ let slot = self.get_personality_slot(&mut bx);
+ let lp0 = slot.project_field(&mut bx, 0);
+ let lp0 = bx.load_operand(lp0).immediate();
+ let lp1 = slot.project_field(&mut bx, 1);
+ let lp1 = bx.load_operand(lp1).immediate();
+ slot.storage_dead(&mut bx);
+
+ let mut lp = bx.const_undef(self.landing_pad_type());
+ lp = bx.insert_value(lp, lp0, 0);
+ lp = bx.insert_value(lp, lp1, 1);
+ bx.resume(lp);
+ }
+ }
+
+ fn codegen_switchint_terminator(
+ &mut self,
+ helper: TerminatorCodegenHelper<'tcx>,
+ mut bx: Bx,
+ discr: &mir::Operand<'tcx>,
+ switch_ty: Ty<'tcx>,
+ targets: &SwitchTargets,
+ ) {
+ let discr = self.codegen_operand(&mut bx, &discr);
+ // `switch_ty` is redundant, sanity-check that.
+ assert_eq!(discr.layout.ty, switch_ty);
+ let mut target_iter = targets.iter();
+ if target_iter.len() == 1 {
+ // If there are two targets (one conditional, one fallback), emit br instead of switch
+ let (test_value, target) = target_iter.next().unwrap();
+ let lltrue = helper.llblock(self, target);
+ let llfalse = helper.llblock(self, targets.otherwise());
+ if switch_ty == bx.tcx().types.bool {
+ // Don't generate trivial icmps when switching on bool
+ match test_value {
+ 0 => bx.cond_br(discr.immediate(), llfalse, lltrue),
+ 1 => bx.cond_br(discr.immediate(), lltrue, llfalse),
+ _ => bug!(),
+ }
+ } else {
+ let switch_llty = bx.immediate_backend_type(bx.layout_of(switch_ty));
+ let llval = bx.const_uint_big(switch_llty, test_value);
+ let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
+ bx.cond_br(cmp, lltrue, llfalse);
+ }
+ } else {
+ bx.switch(
+ discr.immediate(),
+ helper.llblock(self, targets.otherwise()),
+ target_iter.map(|(value, target)| (value, helper.llblock(self, target))),
+ );
+ }
+ }
+
+ fn codegen_return_terminator(&mut self, mut bx: Bx) {
+ // Call `va_end` if this is the definition of a C-variadic function.
+ if self.fn_abi.c_variadic {
+ // The `VaList` "spoofed" argument is just after all the real arguments.
+ let va_list_arg_idx = self.fn_abi.args.len();
+ match self.locals[mir::Local::new(1 + va_list_arg_idx)] {
+ LocalRef::Place(va_list) => {
+ bx.va_end(va_list.llval);
+ }
+ _ => bug!("C-variadic function must have a `VaList` place"),
+ }
+ }
+ if self.fn_abi.ret.layout.abi.is_uninhabited() {
+ // Functions with uninhabited return values are marked `noreturn`,
+ // so we should make sure that we never actually do.
+ // We play it safe by using a well-defined `abort`, but we could go for immediate UB
+ // if that turns out to be helpful.
+ bx.abort();
+ // `abort` does not terminate the block, so we still need to generate
+ // an `unreachable` terminator after it.
+ bx.unreachable();
+ return;
+ }
+ let llval = match self.fn_abi.ret.mode {
+ PassMode::Ignore | PassMode::Indirect { .. } => {
+ bx.ret_void();
+ return;
+ }
+
+ PassMode::Direct(_) | PassMode::Pair(..) => {
+ let op = self.codegen_consume(&mut bx, mir::Place::return_place().as_ref());
+ if let Ref(llval, _, align) = op.val {
+ bx.load(bx.backend_type(op.layout), llval, align)
+ } else {
+ op.immediate_or_packed_pair(&mut bx)
+ }
+ }
+
+ PassMode::Cast(cast_ty) => {
+ let op = match self.locals[mir::RETURN_PLACE] {
+ LocalRef::Operand(Some(op)) => op,
+ LocalRef::Operand(None) => bug!("use of return before def"),
+ LocalRef::Place(cg_place) => OperandRef {
+ val: Ref(cg_place.llval, None, cg_place.align),
+ layout: cg_place.layout,
+ },
+ LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
+ };
+ let llslot = match op.val {
+ Immediate(_) | Pair(..) => {
+ let scratch = PlaceRef::alloca(&mut bx, self.fn_abi.ret.layout);
+ op.val.store(&mut bx, scratch);
+ scratch.llval
+ }
+ Ref(llval, _, align) => {
+ assert_eq!(align, op.layout.align.abi, "return place is unaligned!");
+ llval
+ }
+ };
+ let ty = bx.cast_backend_type(&cast_ty);
+ let addr = bx.pointercast(llslot, bx.type_ptr_to(ty));
+ bx.load(ty, addr, self.fn_abi.ret.layout.align.abi)
+ }
+ };
+ bx.ret(llval);
+ }
+
+ fn codegen_drop_terminator(
+ &mut self,
+ helper: TerminatorCodegenHelper<'tcx>,
+ mut bx: Bx,
+ location: mir::Place<'tcx>,
+ target: mir::BasicBlock,
+ unwind: Option<mir::BasicBlock>,
+ ) {
+ let ty = location.ty(self.mir, bx.tcx()).ty;
+ let ty = self.monomorphize(ty);
+ let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty);
+
+ if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
+ // we don't actually need to drop anything.
+ helper.funclet_br(self, &mut bx, target);
+ return;
+ }
+
+ let place = self.codegen_place(&mut bx, location.as_ref());
+ let (args1, args2);
+ let mut args = if let Some(llextra) = place.llextra {
+ args2 = [place.llval, llextra];
+ &args2[..]
+ } else {
+ args1 = [place.llval];
+ &args1[..]
+ };
+ let (drop_fn, fn_abi) = match ty.kind() {
+ // FIXME(eddyb) perhaps move some of this logic into
+ // `Instance::resolve_drop_in_place`?
+ ty::Dynamic(..) => {
+ let virtual_drop = Instance {
+ def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
+ substs: drop_fn.substs,
+ };
+ let fn_abi = bx.fn_abi_of_instance(virtual_drop, ty::List::empty());
+ let vtable = args[1];
+ args = &args[..1];
+ (
+ meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
+ .get_fn(&mut bx, vtable, ty, &fn_abi),
+ fn_abi,
+ )
+ }
+ _ => (bx.get_fn_addr(drop_fn), bx.fn_abi_of_instance(drop_fn, ty::List::empty())),
+ };
+ helper.do_call(
+ self,
+ &mut bx,
+ fn_abi,
+ drop_fn,
+ args,
+ Some((ReturnDest::Nothing, target)),
+ unwind,
+ &[],
+ );
+ }
+
+ fn codegen_assert_terminator(
+ &mut self,
+ helper: TerminatorCodegenHelper<'tcx>,
+ mut bx: Bx,
+ terminator: &mir::Terminator<'tcx>,
+ cond: &mir::Operand<'tcx>,
+ expected: bool,
+ msg: &mir::AssertMessage<'tcx>,
+ target: mir::BasicBlock,
+ cleanup: Option<mir::BasicBlock>,
+ ) {
+ let span = terminator.source_info.span;
+ let cond = self.codegen_operand(&mut bx, cond).immediate();
+ let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1);
+
+ // This case can currently arise only from functions marked
+ // with #[rustc_inherit_overflow_checks] and inlined from
+ // another crate (mostly core::num generic/#[inline] fns),
+ // while the current crate doesn't use overflow checks.
+ // NOTE: Unlike binops, negation doesn't have its own
+ // checked operation, just a comparison with the minimum
+ // value, so we have to check for the assert message.
+ if !bx.check_overflow() {
+ if let AssertKind::OverflowNeg(_) = *msg {
+ const_cond = Some(expected);
+ }
+ }
+
+ // Don't codegen the panic block if success if known.
+ if const_cond == Some(expected) {
+ helper.funclet_br(self, &mut bx, target);
+ return;
+ }
+
+ // Pass the condition through llvm.expect for branch hinting.
+ let cond = bx.expect(cond, expected);
+
+ // Create the failure block and the conditional branch to it.
+ let lltarget = helper.llblock(self, target);
+ let panic_block = bx.append_sibling_block("panic");
+ if expected {
+ bx.cond_br(cond, lltarget, panic_block);
+ } else {
+ bx.cond_br(cond, panic_block, lltarget);
+ }
+
+ // After this point, bx is the block for the call to panic.
+ bx.switch_to_block(panic_block);
+ self.set_debug_loc(&mut bx, terminator.source_info);
+
+ // Get the location information.
+ let location = self.get_caller_location(&mut bx, terminator.source_info).immediate();
+
+ // Put together the arguments to the panic entry point.
+ let (lang_item, args) = match msg {
+ AssertKind::BoundsCheck { ref len, ref index } => {
+ let len = self.codegen_operand(&mut bx, len).immediate();
+ let index = self.codegen_operand(&mut bx, index).immediate();
+ // It's `fn panic_bounds_check(index: usize, len: usize)`,
+ // and `#[track_caller]` adds an implicit third argument.
+ (LangItem::PanicBoundsCheck, vec![index, len, location])
+ }
+ _ => {
+ let msg = bx.const_str(msg.description());
+ // It's `pub fn panic(expr: &str)`, with the wide reference being passed
+ // as two arguments, and `#[track_caller]` adds an implicit third argument.
+ (LangItem::Panic, vec![msg.0, msg.1, location])
+ }
+ };
+
+ let (fn_abi, llfn) = common::build_langcall(&bx, Some(span), lang_item);
+
+ // Codegen the actual panic invoke/call.
+ helper.do_call(self, &mut bx, fn_abi, llfn, &args, None, cleanup, &[]);
+ }
+
+ fn codegen_abort_terminator(
+ &mut self,
+ helper: TerminatorCodegenHelper<'tcx>,
+ mut bx: Bx,
+ terminator: &mir::Terminator<'tcx>,
+ ) {
+ let span = terminator.source_info.span;
+ self.set_debug_loc(&mut bx, terminator.source_info);
+
+ // Obtain the panic entry point.
+ let (fn_abi, llfn) = common::build_langcall(&bx, Some(span), LangItem::PanicNoUnwind);
+
+ // Codegen the actual panic invoke/call.
+ helper.do_call(self, &mut bx, fn_abi, llfn, &[], None, None, &[]);
+ }
+
+ /// Returns `true` if this is indeed a panic intrinsic and codegen is done.
+ fn codegen_panic_intrinsic(
+ &mut self,
+ helper: &TerminatorCodegenHelper<'tcx>,
+ bx: &mut Bx,
+ intrinsic: Option<Symbol>,
+ instance: Option<Instance<'tcx>>,
+ source_info: mir::SourceInfo,
+ target: Option<mir::BasicBlock>,
+ cleanup: Option<mir::BasicBlock>,
+ ) -> bool {
+ // Emit a panic or a no-op for `assert_*` intrinsics.
+ // These are intrinsics that compile to panics so that we can get a message
+ // which mentions the offending type, even from a const context.
+ #[derive(Debug, PartialEq)]
+ enum AssertIntrinsic {
+ Inhabited,
+ ZeroValid,
+ UninitValid,
+ }
+ let panic_intrinsic = intrinsic.and_then(|i| match i {
+ sym::assert_inhabited => Some(AssertIntrinsic::Inhabited),
+ sym::assert_zero_valid => Some(AssertIntrinsic::ZeroValid),
+ sym::assert_uninit_valid => Some(AssertIntrinsic::UninitValid),
+ _ => None,
+ });
+ if let Some(intrinsic) = panic_intrinsic {
+ use AssertIntrinsic::*;
+
+ let ty = instance.unwrap().substs.type_at(0);
+ let layout = bx.layout_of(ty);
+ let do_panic = match intrinsic {
+ Inhabited => layout.abi.is_uninhabited(),
+ ZeroValid => !bx.tcx().permits_zero_init(layout),
+ UninitValid => !bx.tcx().permits_uninit_init(layout),
+ };
+ if do_panic {
+ let msg_str = with_no_visible_paths!({
+ with_no_trimmed_paths!({
+ if layout.abi.is_uninhabited() {
+ // Use this error even for the other intrinsics as it is more precise.
+ format!("attempted to instantiate uninhabited type `{}`", ty)
+ } else if intrinsic == ZeroValid {
+ format!("attempted to zero-initialize type `{}`, which is invalid", ty)
+ } else {
+ format!(
+ "attempted to leave type `{}` uninitialized, which is invalid",
+ ty
+ )
+ }
+ })
+ });
+ let msg = bx.const_str(&msg_str);
+ let location = self.get_caller_location(bx, source_info).immediate();
+
+ // Obtain the panic entry point.
+ let (fn_abi, llfn) =
+ common::build_langcall(bx, Some(source_info.span), LangItem::Panic);
+
+ // Codegen the actual panic invoke/call.
+ helper.do_call(
+ self,
+ bx,
+ fn_abi,
+ llfn,
+ &[msg.0, msg.1, location],
+ target.as_ref().map(|bb| (ReturnDest::Nothing, *bb)),
+ cleanup,
+ &[],
+ );
+ } else {
+ // a NOP
+ let target = target.unwrap();
+ helper.funclet_br(self, bx, target)
+ }
+ true
+ } else {
+ false
+ }
+ }
+
+ fn codegen_call_terminator(
+ &mut self,
+ helper: TerminatorCodegenHelper<'tcx>,
+ mut bx: Bx,
+ terminator: &mir::Terminator<'tcx>,
+ func: &mir::Operand<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ destination: mir::Place<'tcx>,
+ target: Option<mir::BasicBlock>,
+ cleanup: Option<mir::BasicBlock>,
+ fn_span: Span,
+ ) {
+ let source_info = terminator.source_info;
+ let span = source_info.span;
+
+ // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
+ let callee = self.codegen_operand(&mut bx, func);
+
+ let (instance, mut llfn) = match *callee.layout.ty.kind() {
+ ty::FnDef(def_id, substs) => (
+ Some(
+ ty::Instance::resolve(bx.tcx(), ty::ParamEnv::reveal_all(), def_id, substs)
+ .unwrap()
+ .unwrap()
+ .polymorphize(bx.tcx()),
+ ),
+ None,
+ ),
+ ty::FnPtr(_) => (None, Some(callee.immediate())),
+ _ => bug!("{} is not callable", callee.layout.ty),
+ };
+ let def = instance.map(|i| i.def);
+
+ if let Some(ty::InstanceDef::DropGlue(_, None)) = def {
+ // Empty drop glue; a no-op.
+ let target = target.unwrap();
+ helper.funclet_br(self, &mut bx, target);
+ return;
+ }
+
+ // FIXME(eddyb) avoid computing this if possible, when `instance` is
+ // available - right now `sig` is only needed for getting the `abi`
+ // and figuring out how many extra args were passed to a C-variadic `fn`.
+ let sig = callee.layout.ty.fn_sig(bx.tcx());
+ let abi = sig.abi();
+
+ // Handle intrinsics old codegen wants Expr's for, ourselves.
+ let intrinsic = match def {
+ Some(ty::InstanceDef::Intrinsic(def_id)) => Some(bx.tcx().item_name(def_id)),
+ _ => None,
+ };
+
+ let extra_args = &args[sig.inputs().skip_binder().len()..];
+ let extra_args = bx.tcx().mk_type_list(extra_args.iter().map(|op_arg| {
+ let op_ty = op_arg.ty(self.mir, bx.tcx());
+ self.monomorphize(op_ty)
+ }));
+
+ let fn_abi = match instance {
+ Some(instance) => bx.fn_abi_of_instance(instance, extra_args),
+ None => bx.fn_abi_of_fn_ptr(sig, extra_args),
+ };
+
+ if intrinsic == Some(sym::transmute) {
+ if let Some(target) = target {
+ self.codegen_transmute(&mut bx, &args[0], destination);
+ helper.funclet_br(self, &mut bx, target);
+ } else {
+ // If we are trying to transmute to an uninhabited type,
+ // it is likely there is no allotted destination. In fact,
+ // transmuting to an uninhabited type is UB, which means
+ // we can do what we like. Here, we declare that transmuting
+ // into an uninhabited type is impossible, so anything following
+ // it must be unreachable.
+ assert_eq!(fn_abi.ret.layout.abi, abi::Abi::Uninhabited);
+ bx.unreachable();
+ }
+ return;
+ }
+
+ if self.codegen_panic_intrinsic(
+ &helper,
+ &mut bx,
+ intrinsic,
+ instance,
+ source_info,
+ target,
+ cleanup,
+ ) {
+ return;
+ }
+
+ // The arguments we'll be passing. Plus one to account for outptr, if used.
+ let arg_count = fn_abi.args.len() + fn_abi.ret.is_indirect() as usize;
+ let mut llargs = Vec::with_capacity(arg_count);
+
+ // Prepare the return value destination
+ let ret_dest = if target.is_some() {
+ let is_intrinsic = intrinsic.is_some();
+ self.make_return_dest(&mut bx, destination, &fn_abi.ret, &mut llargs, is_intrinsic)
+ } else {
+ ReturnDest::Nothing
+ };
+
+ if intrinsic == Some(sym::caller_location) {
+ if let Some(target) = target {
+ let location = self
+ .get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info });
+
+ if let ReturnDest::IndirectOperand(tmp, _) = ret_dest {
+ location.val.store(&mut bx, tmp);
+ }
+ self.store_return(&mut bx, ret_dest, &fn_abi.ret, location.immediate());
+ helper.funclet_br(self, &mut bx, target);
+ }
+ return;
+ }
+
+ match intrinsic {
+ None | Some(sym::drop_in_place) => {}
+ Some(sym::copy_nonoverlapping) => unreachable!(),
+ Some(intrinsic) => {
+ let dest = match ret_dest {
+ _ if fn_abi.ret.is_indirect() => llargs[0],
+ ReturnDest::Nothing => {
+ bx.const_undef(bx.type_ptr_to(bx.arg_memory_ty(&fn_abi.ret)))
+ }
+ ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval,
+ ReturnDest::DirectOperand(_) => {
+ bug!("Cannot use direct operand with an intrinsic call")
+ }
+ };
+
+ let args: Vec<_> = args
+ .iter()
+ .enumerate()
+ .map(|(i, arg)| {
+ // The indices passed to simd_shuffle* in the
+ // third argument must be constant. This is
+ // checked by const-qualification, which also
+ // promotes any complex rvalues to constants.
+ if i == 2 && intrinsic.as_str().starts_with("simd_shuffle") {
+ if let mir::Operand::Constant(constant) = arg {
+ let c = self.eval_mir_constant(constant);
+ let (llval, ty) = self.simd_shuffle_indices(
+ &bx,
+ constant.span,
+ self.monomorphize(constant.ty()),
+ c,
+ );
+ return OperandRef {
+ val: Immediate(llval),
+ layout: bx.layout_of(ty),
+ };
+ } else {
+ span_bug!(span, "shuffle indices must be constant");
+ }
+ }
+
+ self.codegen_operand(&mut bx, arg)
+ })
+ .collect();
+
+ Self::codegen_intrinsic_call(
+ &mut bx,
+ *instance.as_ref().unwrap(),
+ &fn_abi,
+ &args,
+ dest,
+ span,
+ );
+
+ if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
+ self.store_return(&mut bx, ret_dest, &fn_abi.ret, dst.llval);
+ }
+
+ if let Some(target) = target {
+ helper.funclet_br(self, &mut bx, target);
+ } else {
+ bx.unreachable();
+ }
+
+ return;
+ }
+ }
+
+ // Split the rust-call tupled arguments off.
+ let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() {
+ let (tup, args) = args.split_last().unwrap();
+ (args, Some(tup))
+ } else {
+ (args, None)
+ };
+
+ let mut copied_constant_arguments = vec![];
+ 'make_args: for (i, arg) in first_args.iter().enumerate() {
+ let mut op = self.codegen_operand(&mut bx, arg);
+
+ if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
+ if let Pair(..) = op.val {
+ // In the case of Rc<Self>, we need to explicitly pass a
+ // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack
+ // that is understood elsewhere in the compiler as a method on
+ // `dyn Trait`.
+ // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
+ // we get a value of a built-in pointer type
+ 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
+ && !op.layout.ty.is_region_ptr()
+ {
+ for i in 0..op.layout.fields.count() {
+ let field = op.extract_field(&mut bx, i);
+ if !field.layout.is_zst() {
+ // we found the one non-zero-sized field that is allowed
+ // now find *its* non-zero-sized field, or stop if it's a
+ // pointer
+ op = field;
+ continue 'descend_newtypes;
+ }
+ }
+
+ span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
+ }
+
+ // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
+ // data pointer and vtable. Look up the method in the vtable, and pass
+ // the data pointer as the first argument
+ match op.val {
+ Pair(data_ptr, meta) => {
+ llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
+ &mut bx,
+ meta,
+ op.layout.ty,
+ &fn_abi,
+ ));
+ llargs.push(data_ptr);
+ continue 'make_args;
+ }
+ other => bug!("expected a Pair, got {:?}", other),
+ }
+ } else if let Ref(data_ptr, Some(meta), _) = op.val {
+ // by-value dynamic dispatch
+ llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
+ &mut bx,
+ meta,
+ op.layout.ty,
+ &fn_abi,
+ ));
+ llargs.push(data_ptr);
+ continue;
+ } else {
+ span_bug!(span, "can't codegen a virtual call on {:?}", op);
+ }
+ }
+
+ // The callee needs to own the argument memory if we pass it
+ // by-ref, so make a local copy of non-immediate constants.
+ match (arg, op.val) {
+ (&mir::Operand::Copy(_), Ref(_, None, _))
+ | (&mir::Operand::Constant(_), Ref(_, None, _)) => {
+ let tmp = PlaceRef::alloca(&mut bx, op.layout);
+ bx.lifetime_start(tmp.llval, tmp.layout.size);
+ op.val.store(&mut bx, tmp);
+ op.val = Ref(tmp.llval, None, tmp.align);
+ copied_constant_arguments.push(tmp);
+ }
+ _ => {}
+ }
+
+ self.codegen_argument(&mut bx, op, &mut llargs, &fn_abi.args[i]);
+ }
+ let num_untupled = untuple.map(|tup| {
+ self.codegen_arguments_untupled(
+ &mut bx,
+ tup,
+ &mut llargs,
+ &fn_abi.args[first_args.len()..],
+ )
+ });
+
+ let needs_location =
+ instance.map_or(false, |i| i.def.requires_caller_location(self.cx.tcx()));
+ if needs_location {
+ let mir_args = if let Some(num_untupled) = num_untupled {
+ first_args.len() + num_untupled
+ } else {
+ args.len()
+ };
+ assert_eq!(
+ fn_abi.args.len(),
+ mir_args + 1,
+ "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR: {:?} {:?} {:?}",
+ instance,
+ fn_span,
+ fn_abi,
+ );
+ let location =
+ self.get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info });
+ debug!(
+ "codegen_call_terminator({:?}): location={:?} (fn_span {:?})",
+ terminator, location, fn_span
+ );
+
+ let last_arg = fn_abi.args.last().unwrap();
+ self.codegen_argument(&mut bx, location, &mut llargs, last_arg);
+ }
+
+ let (is_indirect_call, fn_ptr) = match (llfn, instance) {
+ (Some(llfn), _) => (true, llfn),
+ (None, Some(instance)) => (false, bx.get_fn_addr(instance)),
+ _ => span_bug!(span, "no llfn for call"),
+ };
+
+ // For backends that support CFI using type membership (i.e., testing whether a given
+ // pointer is associated with a type identifier).
+ if bx.tcx().sess.is_sanitizer_cfi_enabled() && is_indirect_call {
+ // Emit type metadata and checks.
+ // FIXME(rcvalle): Add support for generalized identifiers.
+ // FIXME(rcvalle): Create distinct unnamed MDNodes for internal identifiers.
+ let typeid = typeid_for_fnabi(bx.tcx(), fn_abi);
+ let typeid_metadata = self.cx.typeid_metadata(typeid);
+
+ // Test whether the function pointer is associated with the type identifier.
+ let cond = bx.type_test(fn_ptr, typeid_metadata);
+ let bb_pass = bx.append_sibling_block("type_test.pass");
+ let bb_fail = bx.append_sibling_block("type_test.fail");
+ bx.cond_br(cond, bb_pass, bb_fail);
+
+ bx.switch_to_block(bb_pass);
+ helper.do_call(
+ self,
+ &mut bx,
+ fn_abi,
+ fn_ptr,
+ &llargs,
+ target.as_ref().map(|&target| (ret_dest, target)),
+ cleanup,
+ &copied_constant_arguments,
+ );
+
+ bx.switch_to_block(bb_fail);
+ bx.abort();
+ bx.unreachable();
+
+ return;
+ }
+
+ helper.do_call(
+ self,
+ &mut bx,
+ fn_abi,
+ fn_ptr,
+ &llargs,
+ target.as_ref().map(|&target| (ret_dest, target)),
+ cleanup,
+ &copied_constant_arguments,
+ );
+ }
+
+ fn codegen_asm_terminator(
+ &mut self,
+ helper: TerminatorCodegenHelper<'tcx>,
+ mut bx: Bx,
+ terminator: &mir::Terminator<'tcx>,
+ template: &[ast::InlineAsmTemplatePiece],
+ operands: &[mir::InlineAsmOperand<'tcx>],
+ options: ast::InlineAsmOptions,
+ line_spans: &[Span],
+ destination: Option<mir::BasicBlock>,
+ cleanup: Option<mir::BasicBlock>,
+ instance: Instance<'_>,
+ ) {
+ let span = terminator.source_info.span;
+
+ let operands: Vec<_> = operands
+ .iter()
+ .map(|op| match *op {
+ mir::InlineAsmOperand::In { reg, ref value } => {
+ let value = self.codegen_operand(&mut bx, value);
+ InlineAsmOperandRef::In { reg, value }
+ }
+ mir::InlineAsmOperand::Out { reg, late, ref place } => {
+ let place = place.map(|place| self.codegen_place(&mut bx, place.as_ref()));
+ InlineAsmOperandRef::Out { reg, late, place }
+ }
+ mir::InlineAsmOperand::InOut { reg, late, ref in_value, ref out_place } => {
+ let in_value = self.codegen_operand(&mut bx, in_value);
+ let out_place =
+ out_place.map(|out_place| self.codegen_place(&mut bx, out_place.as_ref()));
+ InlineAsmOperandRef::InOut { reg, late, in_value, out_place }
+ }
+ mir::InlineAsmOperand::Const { ref value } => {
+ let const_value = self
+ .eval_mir_constant(value)
+ .unwrap_or_else(|_| span_bug!(span, "asm const cannot be resolved"));
+ let string = common::asm_const_to_str(
+ bx.tcx(),
+ span,
+ const_value,
+ bx.layout_of(value.ty()),
+ );
+ InlineAsmOperandRef::Const { string }
+ }
+ mir::InlineAsmOperand::SymFn { ref value } => {
+ let literal = self.monomorphize(value.literal);
+ if let ty::FnDef(def_id, substs) = *literal.ty().kind() {
+ let instance = ty::Instance::resolve_for_fn_ptr(
+ bx.tcx(),
+ ty::ParamEnv::reveal_all(),
+ def_id,
+ substs,
+ )
+ .unwrap();
+ InlineAsmOperandRef::SymFn { instance }
+ } else {
+ span_bug!(span, "invalid type for asm sym (fn)");
+ }
+ }
+ mir::InlineAsmOperand::SymStatic { def_id } => {
+ InlineAsmOperandRef::SymStatic { def_id }
+ }
+ })
+ .collect();
+
+ helper.do_inlineasm(
+ self,
+ &mut bx,
+ template,
+ &operands,
+ options,
+ line_spans,
+ destination,
+ cleanup,
+ instance,
+ );
+ }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn codegen_block(&mut self, bb: mir::BasicBlock) {
+ let llbb = self.llbb(bb);
+ let mut bx = Bx::build(self.cx, llbb);
+ let mir = self.mir;
+ let data = &mir[bb];
+
+ debug!("codegen_block({:?}={:?})", bb, data);
+
+ for statement in &data.statements {
+ bx = self.codegen_statement(bx, statement);
+ }
+
+ self.codegen_terminator(bx, bb, data.terminator());
+ }
+
+ fn codegen_terminator(
+ &mut self,
+ mut bx: Bx,
+ bb: mir::BasicBlock,
+ terminator: &'tcx mir::Terminator<'tcx>,
+ ) {
+ debug!("codegen_terminator: {:?}", terminator);
+
+ // Create the cleanup bundle, if needed.
+ let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb);
+ let helper = TerminatorCodegenHelper { bb, terminator, funclet_bb };
+
+ self.set_debug_loc(&mut bx, terminator.source_info);
+ match terminator.kind {
+ mir::TerminatorKind::Resume => self.codegen_resume_terminator(helper, bx),
+
+ mir::TerminatorKind::Abort => {
+ self.codegen_abort_terminator(helper, bx, terminator);
+ }
+
+ mir::TerminatorKind::Goto { target } => {
+ helper.funclet_br(self, &mut bx, target);
+ }
+
+ mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref targets } => {
+ self.codegen_switchint_terminator(helper, bx, discr, switch_ty, targets);
+ }
+
+ mir::TerminatorKind::Return => {
+ self.codegen_return_terminator(bx);
+ }
+
+ mir::TerminatorKind::Unreachable => {
+ bx.unreachable();
+ }
+
+ mir::TerminatorKind::Drop { place, target, unwind } => {
+ self.codegen_drop_terminator(helper, bx, place, target, unwind);
+ }
+
+ mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
+ self.codegen_assert_terminator(
+ helper, bx, terminator, cond, expected, msg, target, cleanup,
+ );
+ }
+
+ mir::TerminatorKind::DropAndReplace { .. } => {
+ bug!("undesugared DropAndReplace in codegen: {:?}", terminator);
+ }
+
+ mir::TerminatorKind::Call {
+ ref func,
+ ref args,
+ destination,
+ target,
+ cleanup,
+ from_hir_call: _,
+ fn_span,
+ } => {
+ self.codegen_call_terminator(
+ helper,
+ bx,
+ terminator,
+ func,
+ args,
+ destination,
+ target,
+ cleanup,
+ fn_span,
+ );
+ }
+ mir::TerminatorKind::GeneratorDrop | mir::TerminatorKind::Yield { .. } => {
+ bug!("generator ops in codegen")
+ }
+ mir::TerminatorKind::FalseEdge { .. } | mir::TerminatorKind::FalseUnwind { .. } => {
+ bug!("borrowck false edges in codegen")
+ }
+
+ mir::TerminatorKind::InlineAsm {
+ template,
+ ref operands,
+ options,
+ line_spans,
+ destination,
+ cleanup,
+ } => {
+ self.codegen_asm_terminator(
+ helper,
+ bx,
+ terminator,
+ template,
+ operands,
+ options,
+ line_spans,
+ destination,
+ cleanup,
+ self.instance,
+ );
+ }
+ }
+ }
+
+ fn codegen_argument(
+ &mut self,
+ bx: &mut Bx,
+ op: OperandRef<'tcx, Bx::Value>,
+ llargs: &mut Vec<Bx::Value>,
+ arg: &ArgAbi<'tcx, Ty<'tcx>>,
+ ) {
+ // Fill padding with undef value, where applicable.
+ if let Some(ty) = arg.pad {
+ llargs.push(bx.const_undef(bx.reg_backend_type(&ty)))
+ }
+
+ if arg.is_ignore() {
+ return;
+ }
+
+ if let PassMode::Pair(..) = arg.mode {
+ match op.val {
+ Pair(a, b) => {
+ llargs.push(a);
+ llargs.push(b);
+ return;
+ }
+ _ => bug!("codegen_argument: {:?} invalid for pair argument", op),
+ }
+ } else if arg.is_unsized_indirect() {
+ match op.val {
+ Ref(a, Some(b), _) => {
+ llargs.push(a);
+ llargs.push(b);
+ return;
+ }
+ _ => bug!("codegen_argument: {:?} invalid for unsized indirect argument", op),
+ }
+ }
+
+ // Force by-ref if we have to load through a cast pointer.
+ let (mut llval, align, by_ref) = match op.val {
+ Immediate(_) | Pair(..) => match arg.mode {
+ PassMode::Indirect { .. } | PassMode::Cast(_) => {
+ let scratch = PlaceRef::alloca(bx, arg.layout);
+ op.val.store(bx, scratch);
+ (scratch.llval, scratch.align, true)
+ }
+ _ => (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false),
+ },
+ Ref(llval, _, align) => {
+ if arg.is_indirect() && align < arg.layout.align.abi {
+ // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
+ // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
+ // have scary latent bugs around.
+
+ let scratch = PlaceRef::alloca(bx, arg.layout);
+ base::memcpy_ty(
+ bx,
+ scratch.llval,
+ scratch.align,
+ llval,
+ align,
+ op.layout,
+ MemFlags::empty(),
+ );
+ (scratch.llval, scratch.align, true)
+ } else {
+ (llval, align, true)
+ }
+ }
+ };
+
+ if by_ref && !arg.is_indirect() {
+ // Have to load the argument, maybe while casting it.
+ if let PassMode::Cast(ty) = arg.mode {
+ let llty = bx.cast_backend_type(&ty);
+ let addr = bx.pointercast(llval, bx.type_ptr_to(llty));
+ llval = bx.load(llty, addr, align.min(arg.layout.align.abi));
+ } else {
+ // We can't use `PlaceRef::load` here because the argument
+ // may have a type we don't treat as immediate, but the ABI
+ // used for this call is passing it by-value. In that case,
+ // the load would just produce `OperandValue::Ref` instead
+ // of the `OperandValue::Immediate` we need for the call.
+ llval = bx.load(bx.backend_type(arg.layout), llval, align);
+ if let abi::Abi::Scalar(scalar) = arg.layout.abi {
+ if scalar.is_bool() {
+ bx.range_metadata(llval, WrappingRange { start: 0, end: 1 });
+ }
+ }
+ // We store bools as `i8` so we need to truncate to `i1`.
+ llval = bx.to_immediate(llval, arg.layout);
+ }
+ }
+
+ llargs.push(llval);
+ }
+
+ fn codegen_arguments_untupled(
+ &mut self,
+ bx: &mut Bx,
+ operand: &mir::Operand<'tcx>,
+ llargs: &mut Vec<Bx::Value>,
+ args: &[ArgAbi<'tcx, Ty<'tcx>>],
+ ) -> usize {
+ let tuple = self.codegen_operand(bx, operand);
+
+ // Handle both by-ref and immediate tuples.
+ if let Ref(llval, None, align) = tuple.val {
+ let tuple_ptr = PlaceRef::new_sized_aligned(llval, tuple.layout, align);
+ for i in 0..tuple.layout.fields.count() {
+ let field_ptr = tuple_ptr.project_field(bx, i);
+ let field = bx.load_operand(field_ptr);
+ self.codegen_argument(bx, field, llargs, &args[i]);
+ }
+ } else if let Ref(_, Some(_), _) = tuple.val {
+ bug!("closure arguments must be sized")
+ } else {
+ // If the tuple is immediate, the elements are as well.
+ for i in 0..tuple.layout.fields.count() {
+ let op = tuple.extract_field(bx, i);
+ self.codegen_argument(bx, op, llargs, &args[i]);
+ }
+ }
+ tuple.layout.fields.count()
+ }
+
+ fn get_caller_location(
+ &mut self,
+ bx: &mut Bx,
+ mut source_info: mir::SourceInfo,
+ ) -> OperandRef<'tcx, Bx::Value> {
+ let tcx = bx.tcx();
+
+ let mut span_to_caller_location = |span: Span| {
+ let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
+ let caller = tcx.sess.source_map().lookup_char_pos(topmost.lo());
+ let const_loc = tcx.const_caller_location((
+ Symbol::intern(&caller.file.name.prefer_remapped().to_string_lossy()),
+ caller.line as u32,
+ caller.col_display as u32 + 1,
+ ));
+ OperandRef::from_const(bx, const_loc, bx.tcx().caller_location_ty())
+ };
+
+ // Walk up the `SourceScope`s, in case some of them are from MIR inlining.
+ // If so, the starting `source_info.span` is in the innermost inlined
+ // function, and will be replaced with outer callsite spans as long
+ // as the inlined functions were `#[track_caller]`.
+ loop {
+ let scope_data = &self.mir.source_scopes[source_info.scope];
+
+ if let Some((callee, callsite_span)) = scope_data.inlined {
+ // Stop inside the most nested non-`#[track_caller]` function,
+ // before ever reaching its caller (which is irrelevant).
+ if !callee.def.requires_caller_location(tcx) {
+ return span_to_caller_location(source_info.span);
+ }
+ source_info.span = callsite_span;
+ }
+
+ // Skip past all of the parents with `inlined: None`.
+ match scope_data.inlined_parent_scope {
+ Some(parent) => source_info.scope = parent,
+ None => break,
+ }
+ }
+
+ // No inlined `SourceScope`s, or all of them were `#[track_caller]`.
+ self.caller_location.unwrap_or_else(|| span_to_caller_location(source_info.span))
+ }
+
+ fn get_personality_slot(&mut self, bx: &mut Bx) -> PlaceRef<'tcx, Bx::Value> {
+ let cx = bx.cx();
+ if let Some(slot) = self.personality_slot {
+ slot
+ } else {
+ let layout = cx.layout_of(
+ cx.tcx().intern_tup(&[cx.tcx().mk_mut_ptr(cx.tcx().types.u8), cx.tcx().types.i32]),
+ );
+ let slot = PlaceRef::alloca(bx, layout);
+ self.personality_slot = Some(slot);
+ slot
+ }
+ }
+
+ /// Returns the landing/cleanup pad wrapper around the given basic block.
+ // FIXME(eddyb) rename this to `eh_pad_for`.
+ fn landing_pad_for(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
+ if let Some(landing_pad) = self.landing_pads[bb] {
+ return landing_pad;
+ }
+
+ let landing_pad = self.landing_pad_for_uncached(bb);
+ self.landing_pads[bb] = Some(landing_pad);
+ landing_pad
+ }
+
+ // FIXME(eddyb) rename this to `eh_pad_for_uncached`.
+ fn landing_pad_for_uncached(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
+ let llbb = self.llbb(bb);
+ if base::wants_msvc_seh(self.cx.sess()) {
+ let funclet;
+ let ret_llbb;
+ match self.mir[bb].terminator.as_ref().map(|t| &t.kind) {
+ // This is a basic block that we're aborting the program for,
+ // notably in an `extern` function. These basic blocks are inserted
+ // so that we assert that `extern` functions do indeed not panic,
+ // and if they do we abort the process.
+ //
+ // On MSVC these are tricky though (where we're doing funclets). If
+ // we were to do a cleanuppad (like below) the normal functions like
+ // `longjmp` would trigger the abort logic, terminating the
+ // program. Instead we insert the equivalent of `catch(...)` for C++
+ // which magically doesn't trigger when `longjmp` files over this
+ // frame.
+ //
+ // Lots more discussion can be found on #48251 but this codegen is
+ // modeled after clang's for:
+ //
+ // try {
+ // foo();
+ // } catch (...) {
+ // bar();
+ // }
+ Some(&mir::TerminatorKind::Abort) => {
+ let cs_bb =
+ Bx::append_block(self.cx, self.llfn, &format!("cs_funclet{:?}", bb));
+ let cp_bb =
+ Bx::append_block(self.cx, self.llfn, &format!("cp_funclet{:?}", bb));
+ ret_llbb = cs_bb;
+
+ let mut cs_bx = Bx::build(self.cx, cs_bb);
+ let cs = cs_bx.catch_switch(None, None, &[cp_bb]);
+
+ // The "null" here is actually a RTTI type descriptor for the
+ // C++ personality function, but `catch (...)` has no type so
+ // it's null. The 64 here is actually a bitfield which
+ // represents that this is a catch-all block.
+ let mut cp_bx = Bx::build(self.cx, cp_bb);
+ let null = cp_bx.const_null(
+ cp_bx.type_i8p_ext(cp_bx.cx().data_layout().instruction_address_space),
+ );
+ let sixty_four = cp_bx.const_i32(64);
+ funclet = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
+ cp_bx.br(llbb);
+ }
+ _ => {
+ let cleanup_bb =
+ Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb));
+ ret_llbb = cleanup_bb;
+ let mut cleanup_bx = Bx::build(self.cx, cleanup_bb);
+ funclet = cleanup_bx.cleanup_pad(None, &[]);
+ cleanup_bx.br(llbb);
+ }
+ }
+ self.funclets[bb] = Some(funclet);
+ ret_llbb
+ } else {
+ let bb = Bx::append_block(self.cx, self.llfn, "cleanup");
+ let mut bx = Bx::build(self.cx, bb);
+
+ let llpersonality = self.cx.eh_personality();
+ let llretty = self.landing_pad_type();
+ let lp = bx.cleanup_landing_pad(llretty, llpersonality);
+
+ let slot = self.get_personality_slot(&mut bx);
+ slot.storage_live(&mut bx);
+ Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot);
+
+ bx.br(llbb);
+ bx.llbb()
+ }
+ }
+
+ fn landing_pad_type(&self) -> Bx::Type {
+ let cx = self.cx;
+ cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false)
+ }
+
+ fn unreachable_block(&mut self) -> Bx::BasicBlock {
+ self.unreachable_block.unwrap_or_else(|| {
+ let llbb = Bx::append_block(self.cx, self.llfn, "unreachable");
+ let mut bx = Bx::build(self.cx, llbb);
+ bx.unreachable();
+ self.unreachable_block = Some(llbb);
+ llbb
+ })
+ }
+
+ fn double_unwind_guard(&mut self) -> Bx::BasicBlock {
+ self.double_unwind_guard.unwrap_or_else(|| {
+ assert!(!base::wants_msvc_seh(self.cx.sess()));
+
+ let llbb = Bx::append_block(self.cx, self.llfn, "abort");
+ let mut bx = Bx::build(self.cx, llbb);
+ self.set_debug_loc(&mut bx, mir::SourceInfo::outermost(self.mir.span));
+
+ let llpersonality = self.cx.eh_personality();
+ let llretty = self.landing_pad_type();
+ bx.cleanup_landing_pad(llretty, llpersonality);
+
+ let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, LangItem::PanicNoUnwind);
+ let fn_ty = bx.fn_decl_backend_type(&fn_abi);
+
+ let llret = bx.call(fn_ty, fn_ptr, &[], None);
+ bx.apply_attrs_callsite(&fn_abi, llret);
+ bx.do_not_inline(llret);
+
+ bx.unreachable();
+
+ self.double_unwind_guard = Some(llbb);
+ llbb
+ })
+ }
+
+ /// Get the backend `BasicBlock` for a MIR `BasicBlock`, either already
+ /// cached in `self.cached_llbbs`, or created on demand (and cached).
+ // FIXME(eddyb) rename `llbb` and other `ll`-prefixed things to use a
+ // more backend-agnostic prefix such as `cg` (i.e. this would be `cgbb`).
+ pub fn llbb(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
+ self.cached_llbbs[bb].unwrap_or_else(|| {
+ // FIXME(eddyb) only name the block if `fewer_names` is `false`.
+ let llbb = Bx::append_block(self.cx, self.llfn, &format!("{:?}", bb));
+ self.cached_llbbs[bb] = Some(llbb);
+ llbb
+ })
+ }
+
+ fn make_return_dest(
+ &mut self,
+ bx: &mut Bx,
+ dest: mir::Place<'tcx>,
+ fn_ret: &ArgAbi<'tcx, Ty<'tcx>>,
+ llargs: &mut Vec<Bx::Value>,
+ is_intrinsic: bool,
+ ) -> ReturnDest<'tcx, Bx::Value> {
+ // If the return is ignored, we can just return a do-nothing `ReturnDest`.
+ if fn_ret.is_ignore() {
+ return ReturnDest::Nothing;
+ }
+ let dest = if let Some(index) = dest.as_local() {
+ match self.locals[index] {
+ LocalRef::Place(dest) => dest,
+ LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
+ LocalRef::Operand(None) => {
+ // Handle temporary places, specifically `Operand` ones, as
+ // they don't have `alloca`s.
+ return if fn_ret.is_indirect() {
+ // Odd, but possible, case, we have an operand temporary,
+ // but the calling convention has an indirect return.
+ let tmp = PlaceRef::alloca(bx, fn_ret.layout);
+ tmp.storage_live(bx);
+ llargs.push(tmp.llval);
+ ReturnDest::IndirectOperand(tmp, index)
+ } else if is_intrinsic {
+ // Currently, intrinsics always need a location to store
+ // the result, so we create a temporary `alloca` for the
+ // result.
+ let tmp = PlaceRef::alloca(bx, fn_ret.layout);
+ tmp.storage_live(bx);
+ ReturnDest::IndirectOperand(tmp, index)
+ } else {
+ ReturnDest::DirectOperand(index)
+ };
+ }
+ LocalRef::Operand(Some(_)) => {
+ bug!("place local already assigned to");
+ }
+ }
+ } else {
+ self.codegen_place(
+ bx,
+ mir::PlaceRef { local: dest.local, projection: &dest.projection },
+ )
+ };
+ if fn_ret.is_indirect() {
+ if dest.align < dest.layout.align.abi {
+ // Currently, MIR code generation does not create calls
+ // that store directly to fields of packed structs (in
+ // fact, the calls it creates write only to temps).
+ //
+ // If someone changes that, please update this code path
+ // to create a temporary.
+ span_bug!(self.mir.span, "can't directly store to unaligned value");
+ }
+ llargs.push(dest.llval);
+ ReturnDest::Nothing
+ } else {
+ ReturnDest::Store(dest)
+ }
+ }
+
+ fn codegen_transmute(&mut self, bx: &mut Bx, src: &mir::Operand<'tcx>, dst: mir::Place<'tcx>) {
+ if let Some(index) = dst.as_local() {
+ match self.locals[index] {
+ LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
+ LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"),
+ LocalRef::Operand(None) => {
+ let dst_layout = bx.layout_of(self.monomorphized_place_ty(dst.as_ref()));
+ assert!(!dst_layout.ty.has_erasable_regions());
+ let place = PlaceRef::alloca(bx, dst_layout);
+ place.storage_live(bx);
+ self.codegen_transmute_into(bx, src, place);
+ let op = bx.load_operand(place);
+ place.storage_dead(bx);
+ self.locals[index] = LocalRef::Operand(Some(op));
+ self.debug_introduce_local(bx, index);
+ }
+ LocalRef::Operand(Some(op)) => {
+ assert!(op.layout.is_zst(), "assigning to initialized SSAtemp");
+ }
+ }
+ } else {
+ let dst = self.codegen_place(bx, dst.as_ref());
+ self.codegen_transmute_into(bx, src, dst);
+ }
+ }
+
+ fn codegen_transmute_into(
+ &mut self,
+ bx: &mut Bx,
+ src: &mir::Operand<'tcx>,
+ dst: PlaceRef<'tcx, Bx::Value>,
+ ) {
+ let src = self.codegen_operand(bx, src);
+
+ // Special-case transmutes between scalars as simple bitcasts.
+ match (src.layout.abi, dst.layout.abi) {
+ (abi::Abi::Scalar(src_scalar), abi::Abi::Scalar(dst_scalar)) => {
+ // HACK(eddyb) LLVM doesn't like `bitcast`s between pointers and non-pointers.
+ if (src_scalar.primitive() == abi::Pointer)
+ == (dst_scalar.primitive() == abi::Pointer)
+ {
+ assert_eq!(src.layout.size, dst.layout.size);
+
+ // NOTE(eddyb) the `from_immediate` and `to_immediate_scalar`
+ // conversions allow handling `bool`s the same as `u8`s.
+ let src = bx.from_immediate(src.immediate());
+ let src_as_dst = bx.bitcast(src, bx.backend_type(dst.layout));
+ Immediate(bx.to_immediate_scalar(src_as_dst, dst_scalar)).store(bx, dst);
+ return;
+ }
+ }
+ _ => {}
+ }
+
+ let llty = bx.backend_type(src.layout);
+ let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
+ let align = src.layout.align.abi.min(dst.align);
+ src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, align));
+ }
+
+ // Stores the return value of a function call into it's final location.
+ fn store_return(
+ &mut self,
+ bx: &mut Bx,
+ dest: ReturnDest<'tcx, Bx::Value>,
+ ret_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ llval: Bx::Value,
+ ) {
+ use self::ReturnDest::*;
+
+ match dest {
+ Nothing => (),
+ Store(dst) => bx.store_arg(&ret_abi, llval, dst),
+ IndirectOperand(tmp, index) => {
+ let op = bx.load_operand(tmp);
+ tmp.storage_dead(bx);
+ self.locals[index] = LocalRef::Operand(Some(op));
+ self.debug_introduce_local(bx, index);
+ }
+ DirectOperand(index) => {
+ // If there is a cast, we have to store and reload.
+ let op = if let PassMode::Cast(_) = ret_abi.mode {
+ let tmp = PlaceRef::alloca(bx, ret_abi.layout);
+ tmp.storage_live(bx);
+ bx.store_arg(&ret_abi, llval, tmp);
+ let op = bx.load_operand(tmp);
+ tmp.storage_dead(bx);
+ op
+ } else {
+ OperandRef::from_immediate_or_packed_pair(bx, llval, ret_abi.layout)
+ };
+ self.locals[index] = LocalRef::Operand(Some(op));
+ self.debug_introduce_local(bx, index);
+ }
+ }
+ }
+}
+
+enum ReturnDest<'tcx, V> {
+ // Do nothing; the return value is indirect or ignored.
+ Nothing,
+ // Store the return value to the pointer.
+ Store(PlaceRef<'tcx, V>),
+ // Store an indirect return value to an operand local place.
+ IndirectOperand(PlaceRef<'tcx, V>, mir::Local),
+ // Store a direct return value to an operand local place.
+ DirectOperand(mir::Local),
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs
new file mode 100644
index 000000000..9a995fbf6
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs
@@ -0,0 +1,90 @@
+use crate::mir::operand::OperandRef;
+use crate::traits::*;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{ConstValue, ErrorHandled};
+use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::source_map::Span;
+use rustc_target::abi::Abi;
+
+use super::FunctionCx;
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn eval_mir_constant_to_operand(
+ &self,
+ bx: &mut Bx,
+ constant: &mir::Constant<'tcx>,
+ ) -> Result<OperandRef<'tcx, Bx::Value>, ErrorHandled> {
+ let val = self.eval_mir_constant(constant)?;
+ let ty = self.monomorphize(constant.ty());
+ Ok(OperandRef::from_const(bx, val, ty))
+ }
+
+ pub fn eval_mir_constant(
+ &self,
+ constant: &mir::Constant<'tcx>,
+ ) -> Result<ConstValue<'tcx>, ErrorHandled> {
+ let ct = self.monomorphize(constant.literal);
+ let ct = match ct {
+ mir::ConstantKind::Ty(ct) => ct,
+ mir::ConstantKind::Val(val, _) => return Ok(val),
+ };
+ match ct.kind() {
+ ty::ConstKind::Unevaluated(ct) => self
+ .cx
+ .tcx()
+ .const_eval_resolve(ty::ParamEnv::reveal_all(), ct, None)
+ .map_err(|err| {
+ self.cx.tcx().sess.span_err(constant.span, "erroneous constant encountered");
+ err
+ }),
+ ty::ConstKind::Value(val) => Ok(self.cx.tcx().valtree_to_const_val((ct.ty(), val))),
+ err => span_bug!(
+ constant.span,
+ "encountered bad ConstKind after monomorphizing: {:?}",
+ err
+ ),
+ }
+ }
+
+ /// process constant containing SIMD shuffle indices
+ pub fn simd_shuffle_indices(
+ &mut self,
+ bx: &Bx,
+ span: Span,
+ ty: Ty<'tcx>,
+ constant: Result<ConstValue<'tcx>, ErrorHandled>,
+ ) -> (Bx::Value, Ty<'tcx>) {
+ constant
+ .map(|val| {
+ let field_ty = ty.builtin_index().unwrap();
+ let c = mir::ConstantKind::from_value(val, ty);
+ let values: Vec<_> = bx
+ .tcx()
+ .destructure_mir_constant(ty::ParamEnv::reveal_all(), c)
+ .fields
+ .iter()
+ .map(|field| {
+ if let Some(prim) = field.try_to_scalar() {
+ let layout = bx.layout_of(field_ty);
+ let Abi::Scalar(scalar) = layout.abi else {
+ bug!("from_const: invalid ByVal layout: {:#?}", layout);
+ };
+ bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout))
+ } else {
+ bug!("simd shuffle field {:?}", field)
+ }
+ })
+ .collect();
+ let llval = bx.const_struct(&values, false);
+ (llval, c.ty())
+ })
+ .unwrap_or_else(|_| {
+ bx.tcx().sess.span_err(span, "could not evaluate shuffle_indices at compile time");
+ // We've errored, so we don't have to produce working code.
+ let ty = self.monomorphize(ty);
+ let llty = bx.backend_type(bx.layout_of(ty));
+ (bx.const_undef(llty), ty)
+ })
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs b/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs
new file mode 100644
index 000000000..f1fe49528
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs
@@ -0,0 +1,55 @@
+use crate::traits::*;
+
+use rustc_middle::mir::coverage::*;
+use rustc_middle::mir::Coverage;
+use rustc_middle::mir::SourceScope;
+
+use super::FunctionCx;
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn codegen_coverage(&self, bx: &mut Bx, coverage: Coverage, scope: SourceScope) {
+ // Determine the instance that coverage data was originally generated for.
+ let instance = if let Some(inlined) = scope.inlined_instance(&self.mir.source_scopes) {
+ self.monomorphize(inlined)
+ } else {
+ self.instance
+ };
+
+ let Coverage { kind, code_region } = coverage;
+ match kind {
+ CoverageKind::Counter { function_source_hash, id } => {
+ if bx.set_function_source_hash(instance, function_source_hash) {
+ // If `set_function_source_hash()` returned true, the coverage map is enabled,
+ // so continue adding the counter.
+ if let Some(code_region) = code_region {
+ // Note: Some counters do not have code regions, but may still be referenced
+ // from expressions. In that case, don't add the counter to the coverage map,
+ // but do inject the counter intrinsic.
+ bx.add_coverage_counter(instance, id, code_region);
+ }
+
+ let coverageinfo = bx.tcx().coverageinfo(instance.def);
+
+ let fn_name = bx.get_pgo_func_name_var(instance);
+ let hash = bx.const_u64(function_source_hash);
+ let num_counters = bx.const_u32(coverageinfo.num_counters);
+ let index = bx.const_u32(id.zero_based_index());
+ debug!(
+ "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
+ fn_name, hash, num_counters, index,
+ );
+ bx.instrprof_increment(fn_name, hash, num_counters, index);
+ }
+ }
+ CoverageKind::Expression { id, lhs, op, rhs } => {
+ bx.add_coverage_counter_expression(instance, id, lhs, op, rhs, code_region);
+ }
+ CoverageKind::Unreachable => {
+ bx.add_coverage_unreachable(
+ instance,
+ code_region.expect("unreachable regions always have code regions"),
+ );
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
new file mode 100644
index 000000000..8c3186efc
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -0,0 +1,418 @@
+use crate::traits::*;
+use rustc_index::vec::IndexVec;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_session::config::DebugInfo;
+use rustc_span::symbol::{kw, Symbol};
+use rustc_span::{BytePos, Span};
+use rustc_target::abi::Abi;
+use rustc_target::abi::Size;
+
+use super::operand::{OperandRef, OperandValue};
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+pub struct FunctionDebugContext<S, L> {
+ pub scopes: IndexVec<mir::SourceScope, DebugScope<S, L>>,
+}
+
+#[derive(Copy, Clone)]
+pub enum VariableKind {
+ ArgumentVariable(usize /*index*/),
+ LocalVariable,
+}
+
+/// Like `mir::VarDebugInfo`, but within a `mir::Local`.
+#[derive(Copy, Clone)]
+pub struct PerLocalVarDebugInfo<'tcx, D> {
+ pub name: Symbol,
+ pub source_info: mir::SourceInfo,
+
+ /// `DIVariable` returned by `create_dbg_var`.
+ pub dbg_var: Option<D>,
+
+ /// `.place.projection` from `mir::VarDebugInfo`.
+ pub projection: &'tcx ty::List<mir::PlaceElem<'tcx>>,
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct DebugScope<S, L> {
+ pub dbg_scope: S,
+
+ /// Call site location, if this scope was inlined from another function.
+ pub inlined_at: Option<L>,
+
+ // Start and end offsets of the file to which this DIScope belongs.
+ // These are used to quickly determine whether some span refers to the same file.
+ pub file_start_pos: BytePos,
+ pub file_end_pos: BytePos,
+}
+
+impl<'tcx, S: Copy, L: Copy> DebugScope<S, L> {
+ /// DILocations inherit source file name from the parent DIScope. Due to macro expansions
+ /// it may so happen that the current span belongs to a different file than the DIScope
+ /// corresponding to span's containing source scope. If so, we need to create a DIScope
+ /// "extension" into that file.
+ pub fn adjust_dbg_scope_for_span<Cx: CodegenMethods<'tcx, DIScope = S, DILocation = L>>(
+ &self,
+ cx: &Cx,
+ span: Span,
+ ) -> S {
+ let pos = span.lo();
+ if pos < self.file_start_pos || pos >= self.file_end_pos {
+ let sm = cx.sess().source_map();
+ cx.extend_scope_to_file(self.dbg_scope, &sm.lookup_char_pos(pos).file)
+ } else {
+ self.dbg_scope
+ }
+ }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn set_debug_loc(&self, bx: &mut Bx, source_info: mir::SourceInfo) {
+ bx.set_span(source_info.span);
+ if let Some(dbg_loc) = self.dbg_loc(source_info) {
+ bx.set_dbg_loc(dbg_loc);
+ }
+ }
+
+ fn dbg_loc(&self, source_info: mir::SourceInfo) -> Option<Bx::DILocation> {
+ let (dbg_scope, inlined_at, span) = self.adjusted_span_and_dbg_scope(source_info)?;
+ Some(self.cx.dbg_loc(dbg_scope, inlined_at, span))
+ }
+
+ fn adjusted_span_and_dbg_scope(
+ &self,
+ source_info: mir::SourceInfo,
+ ) -> Option<(Bx::DIScope, Option<Bx::DILocation>, Span)> {
+ let span = self.adjust_span_for_debugging(source_info.span);
+ let scope = &self.debug_context.as_ref()?.scopes[source_info.scope];
+ Some((scope.adjust_dbg_scope_for_span(self.cx, span), scope.inlined_at, span))
+ }
+
+ /// In order to have a good line stepping behavior in debugger, we overwrite debug
+ /// locations of macro expansions with that of the outermost expansion site
+ /// (unless the crate is being compiled with `-Z debug-macros`).
+ fn adjust_span_for_debugging(&self, mut span: Span) -> Span {
+ // Bail out if debug info emission is not enabled.
+ if self.debug_context.is_none() {
+ return span;
+ }
+
+ if span.from_expansion() && !self.cx.sess().opts.unstable_opts.debug_macros {
+ // Walk up the macro expansion chain until we reach a non-expanded span.
+ // We also stop at the function body level because no line stepping can occur
+ // at the level above that.
+ // Use span of the outermost expansion site, while keeping the original lexical scope.
+ span = rustc_span::hygiene::walk_chain(span, self.mir.span.ctxt());
+ }
+
+ span
+ }
+
+ fn spill_operand_to_stack(
+ operand: &OperandRef<'tcx, Bx::Value>,
+ name: Option<String>,
+ bx: &mut Bx,
+ ) -> PlaceRef<'tcx, Bx::Value> {
+ // "Spill" the value onto the stack, for debuginfo,
+ // without forcing non-debuginfo uses of the local
+ // to also load from the stack every single time.
+ // FIXME(#68817) use `llvm.dbg.value` instead,
+ // at least for the cases which LLVM handles correctly.
+ let spill_slot = PlaceRef::alloca(bx, operand.layout);
+ if let Some(name) = name {
+ bx.set_var_name(spill_slot.llval, &(name + ".dbg.spill"));
+ }
+ operand.val.store(bx, spill_slot);
+ spill_slot
+ }
+
+ /// Apply debuginfo and/or name, after creating the `alloca` for a local,
+ /// or initializing the local with an operand (whichever applies).
+ pub fn debug_introduce_local(&self, bx: &mut Bx, local: mir::Local) {
+ let full_debug_info = bx.sess().opts.debuginfo == DebugInfo::Full;
+
+ // FIXME(eddyb) maybe name the return place as `_0` or `return`?
+ if local == mir::RETURN_PLACE && !self.mir.local_decls[mir::RETURN_PLACE].is_user_variable()
+ {
+ return;
+ }
+
+ let vars = match &self.per_local_var_debug_info {
+ Some(per_local) => &per_local[local],
+ None => return,
+ };
+ let whole_local_var = vars.iter().find(|var| var.projection.is_empty()).copied();
+ let has_proj = || vars.iter().any(|var| !var.projection.is_empty());
+
+ let fallback_var = if self.mir.local_kind(local) == mir::LocalKind::Arg {
+ let arg_index = local.index() - 1;
+
+ // Add debuginfo even to unnamed arguments.
+ // FIXME(eddyb) is this really needed?
+ if arg_index == 0 && has_proj() {
+ // Hide closure environments from debuginfo.
+ // FIXME(eddyb) shouldn't `ArgumentVariable` indices
+ // be offset to account for the hidden environment?
+ None
+ } else if whole_local_var.is_some() {
+ // No need to make up anything, there is a `mir::VarDebugInfo`
+ // covering the whole local.
+ // FIXME(eddyb) take `whole_local_var.source_info.scope` into
+ // account, just in case it doesn't use `ArgumentVariable`
+ // (after #67586 gets fixed).
+ None
+ } else {
+ let name = kw::Empty;
+ let decl = &self.mir.local_decls[local];
+ let dbg_var = if full_debug_info {
+ self.adjusted_span_and_dbg_scope(decl.source_info).map(
+ |(dbg_scope, _, span)| {
+ // FIXME(eddyb) is this `+ 1` needed at all?
+ let kind = VariableKind::ArgumentVariable(arg_index + 1);
+
+ let arg_ty = self.monomorphize(decl.ty);
+
+ self.cx.create_dbg_var(name, arg_ty, dbg_scope, kind, span)
+ },
+ )
+ } else {
+ None
+ };
+
+ Some(PerLocalVarDebugInfo {
+ name,
+ source_info: decl.source_info,
+ dbg_var,
+ projection: ty::List::empty(),
+ })
+ }
+ } else {
+ None
+ };
+
+ let local_ref = &self.locals[local];
+
+ let name = if bx.sess().fewer_names() {
+ None
+ } else {
+ Some(match whole_local_var.or(fallback_var) {
+ Some(var) if var.name != kw::Empty => var.name.to_string(),
+ _ => format!("{:?}", local),
+ })
+ };
+
+ if let Some(name) = &name {
+ match local_ref {
+ LocalRef::Place(place) | LocalRef::UnsizedPlace(place) => {
+ bx.set_var_name(place.llval, name);
+ }
+ LocalRef::Operand(Some(operand)) => match operand.val {
+ OperandValue::Ref(x, ..) | OperandValue::Immediate(x) => {
+ bx.set_var_name(x, name);
+ }
+ OperandValue::Pair(a, b) => {
+ // FIXME(eddyb) these are scalar components,
+ // maybe extract the high-level fields?
+ bx.set_var_name(a, &(name.clone() + ".0"));
+ bx.set_var_name(b, &(name.clone() + ".1"));
+ }
+ },
+ LocalRef::Operand(None) => {}
+ }
+ }
+
+ if !full_debug_info || vars.is_empty() && fallback_var.is_none() {
+ return;
+ }
+
+ let base = match local_ref {
+ LocalRef::Operand(None) => return,
+
+ LocalRef::Operand(Some(operand)) => {
+ // Don't spill operands onto the stack in naked functions.
+ // See: https://github.com/rust-lang/rust/issues/42779
+ let attrs = bx.tcx().codegen_fn_attrs(self.instance.def_id());
+ if attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
+ return;
+ }
+
+ Self::spill_operand_to_stack(operand, name, bx)
+ }
+
+ LocalRef::Place(place) => *place,
+
+ // FIXME(eddyb) add debuginfo for unsized places too.
+ LocalRef::UnsizedPlace(_) => return,
+ };
+
+ let vars = vars.iter().copied().chain(fallback_var);
+
+ for var in vars {
+ let Some(dbg_var) = var.dbg_var else { continue };
+ let Some(dbg_loc) = self.dbg_loc(var.source_info) else { continue };
+
+ let mut direct_offset = Size::ZERO;
+ // FIXME(eddyb) use smallvec here.
+ let mut indirect_offsets = vec![];
+ let mut place = base;
+
+ for elem in &var.projection[..] {
+ match *elem {
+ mir::ProjectionElem::Deref => {
+ indirect_offsets.push(Size::ZERO);
+ place = bx.load_operand(place).deref(bx.cx());
+ }
+ mir::ProjectionElem::Field(field, _) => {
+ let i = field.index();
+ let offset = indirect_offsets.last_mut().unwrap_or(&mut direct_offset);
+ *offset += place.layout.fields.offset(i);
+ place = place.project_field(bx, i);
+ }
+ mir::ProjectionElem::Downcast(_, variant) => {
+ place = place.project_downcast(bx, variant);
+ }
+ _ => span_bug!(
+ var.source_info.span,
+ "unsupported var debuginfo place `{:?}`",
+ mir::Place { local, projection: var.projection },
+ ),
+ }
+ }
+
+ // When targeting MSVC, create extra allocas for arguments instead of pointing multiple
+ // dbg_var_addr() calls into the same alloca with offsets. MSVC uses CodeView records
+ // not DWARF and LLVM doesn't support translating the resulting
+ // [DW_OP_deref, DW_OP_plus_uconst, offset, DW_OP_deref] debug info to CodeView.
+ // Creating extra allocas on the stack makes the resulting debug info simple enough
+ // that LLVM can generate correct CodeView records and thus the values appear in the
+ // debugger. (#83709)
+ let should_create_individual_allocas = bx.cx().sess().target.is_like_msvc
+ && self.mir.local_kind(local) == mir::LocalKind::Arg
+ // LLVM can handle simple things but anything more complex than just a direct
+ // offset or one indirect offset of 0 is too complex for it to generate CV records
+ // correctly.
+ && (direct_offset != Size::ZERO
+ || !matches!(&indirect_offsets[..], [Size::ZERO] | []));
+
+ if should_create_individual_allocas {
+ // Create a variable which will be a pointer to the actual value
+ let ptr_ty = bx.tcx().mk_ty(ty::RawPtr(ty::TypeAndMut {
+ mutbl: mir::Mutability::Mut,
+ ty: place.layout.ty,
+ }));
+ let ptr_layout = bx.layout_of(ptr_ty);
+ let alloca = PlaceRef::alloca(bx, ptr_layout);
+ bx.set_var_name(alloca.llval, &(var.name.to_string() + ".dbg.spill"));
+
+ // Write the pointer to the variable
+ bx.store(place.llval, alloca.llval, alloca.align);
+
+ // Point the debug info to `*alloca` for the current variable
+ bx.dbg_var_addr(dbg_var, dbg_loc, alloca.llval, Size::ZERO, &[Size::ZERO]);
+ } else {
+ bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, direct_offset, &indirect_offsets);
+ }
+ }
+ }
+
+ pub fn debug_introduce_locals(&self, bx: &mut Bx) {
+ if bx.sess().opts.debuginfo == DebugInfo::Full || !bx.sess().fewer_names() {
+ for local in self.locals.indices() {
+ self.debug_introduce_local(bx, local);
+ }
+ }
+ }
+
+ /// Partition all `VarDebugInfo` in `self.mir`, by their base `Local`.
+ pub fn compute_per_local_var_debug_info(
+ &self,
+ bx: &mut Bx,
+ ) -> Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>> {
+ let full_debug_info = self.cx.sess().opts.debuginfo == DebugInfo::Full;
+
+ let target_is_msvc = self.cx.sess().target.is_like_msvc;
+
+ if !full_debug_info && self.cx.sess().fewer_names() {
+ return None;
+ }
+
+ let mut per_local = IndexVec::from_elem(vec![], &self.mir.local_decls);
+ for var in &self.mir.var_debug_info {
+ let dbg_scope_and_span = if full_debug_info {
+ self.adjusted_span_and_dbg_scope(var.source_info)
+ } else {
+ None
+ };
+
+ let dbg_var = dbg_scope_and_span.map(|(dbg_scope, _, span)| {
+ let (var_ty, var_kind) = match var.value {
+ mir::VarDebugInfoContents::Place(place) => {
+ let var_ty = self.monomorphized_place_ty(place.as_ref());
+ let var_kind = if self.mir.local_kind(place.local) == mir::LocalKind::Arg
+ && place.projection.is_empty()
+ && var.source_info.scope == mir::OUTERMOST_SOURCE_SCOPE
+ {
+ let arg_index = place.local.index() - 1;
+ if target_is_msvc {
+ // ScalarPair parameters are spilled to the stack so they need to
+ // be marked as a `LocalVariable` for MSVC debuggers to visualize
+ // their data correctly. (See #81894 & #88625)
+ let var_ty_layout = self.cx.layout_of(var_ty);
+ if let Abi::ScalarPair(_, _) = var_ty_layout.abi {
+ VariableKind::LocalVariable
+ } else {
+ VariableKind::ArgumentVariable(arg_index + 1)
+ }
+ } else {
+ // FIXME(eddyb) shouldn't `ArgumentVariable` indices be
+ // offset in closures to account for the hidden environment?
+ // Also, is this `+ 1` needed at all?
+ VariableKind::ArgumentVariable(arg_index + 1)
+ }
+ } else {
+ VariableKind::LocalVariable
+ };
+ (var_ty, var_kind)
+ }
+ mir::VarDebugInfoContents::Const(c) => {
+ let ty = self.monomorphize(c.ty());
+ (ty, VariableKind::LocalVariable)
+ }
+ };
+
+ self.cx.create_dbg_var(var.name, var_ty, dbg_scope, var_kind, span)
+ });
+
+ match var.value {
+ mir::VarDebugInfoContents::Place(place) => {
+ per_local[place.local].push(PerLocalVarDebugInfo {
+ name: var.name,
+ source_info: var.source_info,
+ dbg_var,
+ projection: place.projection,
+ });
+ }
+ mir::VarDebugInfoContents::Const(c) => {
+ if let Some(dbg_var) = dbg_var {
+ let Some(dbg_loc) = self.dbg_loc(var.source_info) else { continue };
+
+ if let Ok(operand) = self.eval_mir_constant_to_operand(bx, &c) {
+ let base = Self::spill_operand_to_stack(
+ &operand,
+ Some(var.name.to_string()),
+ bx,
+ );
+
+ bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, Size::ZERO, &[]);
+ }
+ }
+ }
+ }
+ }
+ Some(per_local)
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
new file mode 100644
index 000000000..94ac71a4d
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -0,0 +1,636 @@
+use super::operand::{OperandRef, OperandValue};
+use super::place::PlaceRef;
+use super::FunctionCx;
+use crate::common::{span_invalid_monomorphization_error, IntPredicate};
+use crate::glue;
+use crate::meth;
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::{sym, Span};
+use rustc_target::abi::{
+ call::{FnAbi, PassMode},
+ WrappingRange,
+};
+
+fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ allow_overlap: bool,
+ volatile: bool,
+ ty: Ty<'tcx>,
+ dst: Bx::Value,
+ src: Bx::Value,
+ count: Bx::Value,
+) {
+ let layout = bx.layout_of(ty);
+ let size = layout.size;
+ let align = layout.align.abi;
+ let size = bx.mul(bx.const_usize(size.bytes()), count);
+ let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
+ if allow_overlap {
+ bx.memmove(dst, align, src, align, size, flags);
+ } else {
+ bx.memcpy(dst, align, src, align, size, flags);
+ }
+}
+
+fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ volatile: bool,
+ ty: Ty<'tcx>,
+ dst: Bx::Value,
+ val: Bx::Value,
+ count: Bx::Value,
+) {
+ let layout = bx.layout_of(ty);
+ let size = layout.size;
+ let align = layout.align.abi;
+ let size = bx.mul(bx.const_usize(size.bytes()), count);
+ let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
+ bx.memset(dst, val, size, align, flags);
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn codegen_intrinsic_call(
+ bx: &mut Bx,
+ instance: ty::Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ args: &[OperandRef<'tcx, Bx::Value>],
+ llresult: Bx::Value,
+ span: Span,
+ ) {
+ let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
+
+ let ty::FnDef(def_id, substs) = *callee_ty.kind() else {
+ bug!("expected fn item type, found {}", callee_ty);
+ };
+
+ let sig = callee_ty.fn_sig(bx.tcx());
+ let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
+ let arg_tys = sig.inputs();
+ let ret_ty = sig.output();
+ let name = bx.tcx().item_name(def_id);
+ let name_str = name.as_str();
+
+ let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
+ let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
+
+ let llval = match name {
+ sym::assume => {
+ bx.assume(args[0].immediate());
+ return;
+ }
+ sym::abort => {
+ bx.abort();
+ return;
+ }
+
+ sym::va_start => bx.va_start(args[0].immediate()),
+ sym::va_end => bx.va_end(args[0].immediate()),
+ sym::size_of_val => {
+ let tp_ty = substs.type_at(0);
+ if let OperandValue::Pair(_, meta) = args[0].val {
+ let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
+ llsize
+ } else {
+ bx.const_usize(bx.layout_of(tp_ty).size.bytes())
+ }
+ }
+ sym::min_align_of_val => {
+ let tp_ty = substs.type_at(0);
+ if let OperandValue::Pair(_, meta) = args[0].val {
+ let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
+ llalign
+ } else {
+ bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes())
+ }
+ }
+ sym::vtable_size | sym::vtable_align => {
+ let vtable = args[0].immediate();
+ let idx = match name {
+ sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
+ sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
+ _ => bug!(),
+ };
+ let value = meth::VirtualIndex::from_index(idx).get_usize(bx, vtable);
+ if name == sym::vtable_align {
+ // Alignment is always nonzero.
+ bx.range_metadata(value, WrappingRange { start: 1, end: !0 });
+ };
+ value
+ }
+ sym::pref_align_of
+ | sym::needs_drop
+ | sym::type_id
+ | sym::type_name
+ | sym::variant_count => {
+ let value = bx
+ .tcx()
+ .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
+ .unwrap();
+ OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
+ }
+ sym::offset => {
+ let ty = substs.type_at(0);
+ let layout = bx.layout_of(ty);
+ let ptr = args[0].immediate();
+ let offset = args[1].immediate();
+ bx.inbounds_gep(bx.backend_type(layout), ptr, &[offset])
+ }
+ sym::arith_offset => {
+ let ty = substs.type_at(0);
+ let layout = bx.layout_of(ty);
+ let ptr = args[0].immediate();
+ let offset = args[1].immediate();
+ bx.gep(bx.backend_type(layout), ptr, &[offset])
+ }
+ sym::copy => {
+ copy_intrinsic(
+ bx,
+ true,
+ false,
+ substs.type_at(0),
+ args[1].immediate(),
+ args[0].immediate(),
+ args[2].immediate(),
+ );
+ return;
+ }
+ sym::write_bytes => {
+ memset_intrinsic(
+ bx,
+ false,
+ substs.type_at(0),
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ );
+ return;
+ }
+
+ sym::volatile_copy_nonoverlapping_memory => {
+ copy_intrinsic(
+ bx,
+ false,
+ true,
+ substs.type_at(0),
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ );
+ return;
+ }
+ sym::volatile_copy_memory => {
+ copy_intrinsic(
+ bx,
+ true,
+ true,
+ substs.type_at(0),
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ );
+ return;
+ }
+ sym::volatile_set_memory => {
+ memset_intrinsic(
+ bx,
+ true,
+ substs.type_at(0),
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ );
+ return;
+ }
+ sym::volatile_store => {
+ let dst = args[0].deref(bx.cx());
+ args[1].val.volatile_store(bx, dst);
+ return;
+ }
+ sym::unaligned_volatile_store => {
+ let dst = args[0].deref(bx.cx());
+ args[1].val.unaligned_volatile_store(bx, dst);
+ return;
+ }
+ sym::add_with_overflow
+ | sym::sub_with_overflow
+ | sym::mul_with_overflow
+ | sym::unchecked_div
+ | sym::unchecked_rem
+ | sym::unchecked_shl
+ | sym::unchecked_shr
+ | sym::unchecked_add
+ | sym::unchecked_sub
+ | sym::unchecked_mul
+ | sym::exact_div => {
+ let ty = arg_tys[0];
+ match int_type_width_signed(ty, bx.tcx()) {
+ Some((_width, signed)) => match name {
+ sym::add_with_overflow
+ | sym::sub_with_overflow
+ | sym::mul_with_overflow => {
+ let op = match name {
+ sym::add_with_overflow => OverflowOp::Add,
+ sym::sub_with_overflow => OverflowOp::Sub,
+ sym::mul_with_overflow => OverflowOp::Mul,
+ _ => bug!(),
+ };
+ let (val, overflow) =
+ bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate());
+ // Convert `i1` to a `bool`, and write it to the out parameter
+ let val = bx.from_immediate(val);
+ let overflow = bx.from_immediate(overflow);
+
+ let dest = result.project_field(bx, 0);
+ bx.store(val, dest.llval, dest.align);
+ let dest = result.project_field(bx, 1);
+ bx.store(overflow, dest.llval, dest.align);
+
+ return;
+ }
+ sym::exact_div => {
+ if signed {
+ bx.exactsdiv(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.exactudiv(args[0].immediate(), args[1].immediate())
+ }
+ }
+ sym::unchecked_div => {
+ if signed {
+ bx.sdiv(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.udiv(args[0].immediate(), args[1].immediate())
+ }
+ }
+ sym::unchecked_rem => {
+ if signed {
+ bx.srem(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.urem(args[0].immediate(), args[1].immediate())
+ }
+ }
+ sym::unchecked_shl => bx.shl(args[0].immediate(), args[1].immediate()),
+ sym::unchecked_shr => {
+ if signed {
+ bx.ashr(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.lshr(args[0].immediate(), args[1].immediate())
+ }
+ }
+ sym::unchecked_add => {
+ if signed {
+ bx.unchecked_sadd(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.unchecked_uadd(args[0].immediate(), args[1].immediate())
+ }
+ }
+ sym::unchecked_sub => {
+ if signed {
+ bx.unchecked_ssub(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.unchecked_usub(args[0].immediate(), args[1].immediate())
+ }
+ }
+ sym::unchecked_mul => {
+ if signed {
+ bx.unchecked_smul(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.unchecked_umul(args[0].immediate(), args[1].immediate())
+ }
+ }
+ _ => bug!(),
+ },
+ None => {
+ span_invalid_monomorphization_error(
+ bx.tcx().sess,
+ span,
+ &format!(
+ "invalid monomorphization of `{}` intrinsic: \
+ expected basic integer type, found `{}`",
+ name, ty
+ ),
+ );
+ return;
+ }
+ }
+ }
+ sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
+ match float_type_width(arg_tys[0]) {
+ Some(_width) => match name {
+ sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
+ sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
+ sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
+ sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
+ sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
+ _ => bug!(),
+ },
+ None => {
+ span_invalid_monomorphization_error(
+ bx.tcx().sess,
+ span,
+ &format!(
+ "invalid monomorphization of `{}` intrinsic: \
+ expected basic float type, found `{}`",
+ name, arg_tys[0]
+ ),
+ );
+ return;
+ }
+ }
+ }
+
+ sym::float_to_int_unchecked => {
+ if float_type_width(arg_tys[0]).is_none() {
+ span_invalid_monomorphization_error(
+ bx.tcx().sess,
+ span,
+ &format!(
+ "invalid monomorphization of `float_to_int_unchecked` \
+ intrinsic: expected basic float type, \
+ found `{}`",
+ arg_tys[0]
+ ),
+ );
+ return;
+ }
+ let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
+ span_invalid_monomorphization_error(
+ bx.tcx().sess,
+ span,
+ &format!(
+ "invalid monomorphization of `float_to_int_unchecked` \
+ intrinsic: expected basic integer type, \
+ found `{}`",
+ ret_ty
+ ),
+ );
+ return;
+ };
+ if signed {
+ bx.fptosi(args[0].immediate(), llret_ty)
+ } else {
+ bx.fptoui(args[0].immediate(), llret_ty)
+ }
+ }
+
+ sym::discriminant_value => {
+ if ret_ty.is_integral() {
+ args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
+ } else {
+ span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
+ }
+ }
+
+ sym::const_allocate => {
+ // returns a null pointer at runtime.
+ bx.const_null(bx.type_i8p())
+ }
+
+ sym::const_deallocate => {
+ // nop at runtime.
+ return;
+ }
+
+ // This requires that atomic intrinsics follow a specific naming pattern:
+ // "atomic_<operation>[_<ordering>]"
+ name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
+ use crate::common::AtomicOrdering::*;
+ use crate::common::{AtomicRmwBinOp, SynchronizationScope};
+
+ let Some((instruction, ordering)) = atomic.split_once('_') else {
+ bx.sess().fatal("Atomic intrinsic missing memory ordering");
+ };
+
+ let parse_ordering = |bx: &Bx, s| match s {
+ "unordered" => Unordered,
+ "relaxed" => Relaxed,
+ "acquire" => Acquire,
+ "release" => Release,
+ "acqrel" => AcquireRelease,
+ "seqcst" => SequentiallyConsistent,
+ _ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
+ };
+
+ let invalid_monomorphization = |ty| {
+ span_invalid_monomorphization_error(
+ bx.tcx().sess,
+ span,
+ &format!(
+ "invalid monomorphization of `{}` intrinsic: \
+ expected basic integer type, found `{}`",
+ name, ty
+ ),
+ );
+ };
+
+ match instruction {
+ "cxchg" | "cxchgweak" => {
+ let Some((success, failure)) = ordering.split_once('_') else {
+ bx.sess().fatal("Atomic compare-exchange intrinsic missing failure memory ordering");
+ };
+ let ty = substs.type_at(0);
+ if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
+ let weak = instruction == "cxchgweak";
+ let mut dst = args[0].immediate();
+ let mut cmp = args[1].immediate();
+ let mut src = args[2].immediate();
+ if ty.is_unsafe_ptr() {
+ // Some platforms do not support atomic operations on pointers,
+ // so we cast to integer first.
+ let ptr_llty = bx.type_ptr_to(bx.type_isize());
+ dst = bx.pointercast(dst, ptr_llty);
+ cmp = bx.ptrtoint(cmp, bx.type_isize());
+ src = bx.ptrtoint(src, bx.type_isize());
+ }
+ let pair = bx.atomic_cmpxchg(dst, cmp, src, parse_ordering(bx, success), parse_ordering(bx, failure), weak);
+ let val = bx.extract_value(pair, 0);
+ let success = bx.extract_value(pair, 1);
+ let val = bx.from_immediate(val);
+ let success = bx.from_immediate(success);
+
+ let dest = result.project_field(bx, 0);
+ bx.store(val, dest.llval, dest.align);
+ let dest = result.project_field(bx, 1);
+ bx.store(success, dest.llval, dest.align);
+ return;
+ } else {
+ return invalid_monomorphization(ty);
+ }
+ }
+
+ "load" => {
+ let ty = substs.type_at(0);
+ if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
+ let layout = bx.layout_of(ty);
+ let size = layout.size;
+ let mut source = args[0].immediate();
+ if ty.is_unsafe_ptr() {
+ // Some platforms do not support atomic operations on pointers,
+ // so we cast to integer first...
+ let llty = bx.type_isize();
+ let ptr_llty = bx.type_ptr_to(llty);
+ source = bx.pointercast(source, ptr_llty);
+ let result = bx.atomic_load(llty, source, parse_ordering(bx, ordering), size);
+ // ... and then cast the result back to a pointer
+ bx.inttoptr(result, bx.backend_type(layout))
+ } else {
+ bx.atomic_load(bx.backend_type(layout), source, parse_ordering(bx, ordering), size)
+ }
+ } else {
+ return invalid_monomorphization(ty);
+ }
+ }
+
+ "store" => {
+ let ty = substs.type_at(0);
+ if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
+ let size = bx.layout_of(ty).size;
+ let mut val = args[1].immediate();
+ let mut ptr = args[0].immediate();
+ if ty.is_unsafe_ptr() {
+ // Some platforms do not support atomic operations on pointers,
+ // so we cast to integer first.
+ let ptr_llty = bx.type_ptr_to(bx.type_isize());
+ ptr = bx.pointercast(ptr, ptr_llty);
+ val = bx.ptrtoint(val, bx.type_isize());
+ }
+ bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
+ return;
+ } else {
+ return invalid_monomorphization(ty);
+ }
+ }
+
+ "fence" => {
+ bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::CrossThread);
+ return;
+ }
+
+ "singlethreadfence" => {
+ bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::SingleThread);
+ return;
+ }
+
+ // These are all AtomicRMW ops
+ op => {
+ let atom_op = match op {
+ "xchg" => AtomicRmwBinOp::AtomicXchg,
+ "xadd" => AtomicRmwBinOp::AtomicAdd,
+ "xsub" => AtomicRmwBinOp::AtomicSub,
+ "and" => AtomicRmwBinOp::AtomicAnd,
+ "nand" => AtomicRmwBinOp::AtomicNand,
+ "or" => AtomicRmwBinOp::AtomicOr,
+ "xor" => AtomicRmwBinOp::AtomicXor,
+ "max" => AtomicRmwBinOp::AtomicMax,
+ "min" => AtomicRmwBinOp::AtomicMin,
+ "umax" => AtomicRmwBinOp::AtomicUMax,
+ "umin" => AtomicRmwBinOp::AtomicUMin,
+ _ => bx.sess().fatal("unknown atomic operation"),
+ };
+
+ let ty = substs.type_at(0);
+ if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
+ let mut ptr = args[0].immediate();
+ let mut val = args[1].immediate();
+ if ty.is_unsafe_ptr() {
+ // Some platforms do not support atomic operations on pointers,
+ // so we cast to integer first.
+ let ptr_llty = bx.type_ptr_to(bx.type_isize());
+ ptr = bx.pointercast(ptr, ptr_llty);
+ val = bx.ptrtoint(val, bx.type_isize());
+ }
+ bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
+ } else {
+ return invalid_monomorphization(ty);
+ }
+ }
+ }
+ }
+
+ sym::nontemporal_store => {
+ let dst = args[0].deref(bx.cx());
+ args[1].val.nontemporal_store(bx, dst);
+ return;
+ }
+
+ sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
+ let a = args[0].immediate();
+ let b = args[1].immediate();
+ if name == sym::ptr_guaranteed_eq {
+ bx.icmp(IntPredicate::IntEQ, a, b)
+ } else {
+ bx.icmp(IntPredicate::IntNE, a, b)
+ }
+ }
+
+ sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
+ let ty = substs.type_at(0);
+ let pointee_size = bx.layout_of(ty).size;
+
+ let a = args[0].immediate();
+ let b = args[1].immediate();
+ let a = bx.ptrtoint(a, bx.type_isize());
+ let b = bx.ptrtoint(b, bx.type_isize());
+ let pointee_size = bx.const_usize(pointee_size.bytes());
+ if name == sym::ptr_offset_from {
+ // This is the same sequence that Clang emits for pointer subtraction.
+ // It can be neither `nsw` nor `nuw` because the input is treated as
+ // unsigned but then the output is treated as signed, so neither works.
+ let d = bx.sub(a, b);
+ // this is where the signed magic happens (notice the `s` in `exactsdiv`)
+ bx.exactsdiv(d, pointee_size)
+ } else {
+ // The `_unsigned` version knows the relative ordering of the pointers,
+ // so can use `sub nuw` and `udiv exact` instead of dealing in signed.
+ let d = bx.unchecked_usub(a, b);
+ bx.exactudiv(d, pointee_size)
+ }
+ }
+
+ _ => {
+ // Need to use backend-specific things in the implementation.
+ bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
+ return;
+ }
+ };
+
+ if !fn_abi.ret.is_ignore() {
+ if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(&ty));
+ let ptr = bx.pointercast(result.llval, ptr_llty);
+ bx.store(llval, ptr, result.align);
+ } else {
+ OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
+ .val
+ .store(bx, result);
+ }
+ }
+ }
+}
+
+// Returns the width of an int Ty, and if it's signed or not
+// Returns None if the type is not an integer
+// FIXME: there’s multiple of this functions, investigate using some of the already existing
+// stuffs.
+fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
+ match ty.kind() {
+ ty::Int(t) => {
+ Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
+ }
+ ty::Uint(t) => {
+ Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
+ }
+ _ => None,
+ }
+}
+
+// Returns the width of a float Ty
+// Returns None if the type is not a float
+fn float_type_width(ty: Ty<'_>) -> Option<u64> {
+ match ty.kind() {
+ ty::Float(t) => Some(t.bit_width()),
+ _ => None,
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs
new file mode 100644
index 000000000..8ee375fa9
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs
@@ -0,0 +1,410 @@
+use crate::traits::*;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::ErrorHandled;
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, TyAndLayout};
+use rustc_middle::ty::{self, Instance, Ty, TypeFoldable, TypeVisitable};
+use rustc_target::abi::call::{FnAbi, PassMode};
+
+use std::iter;
+
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+
+use self::debuginfo::{FunctionDebugContext, PerLocalVarDebugInfo};
+use self::place::PlaceRef;
+use rustc_middle::mir::traversal;
+
+use self::operand::{OperandRef, OperandValue};
+
+/// Master context for codegenning from MIR.
+pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
+ instance: Instance<'tcx>,
+
+ mir: &'tcx mir::Body<'tcx>,
+
+ debug_context: Option<FunctionDebugContext<Bx::DIScope, Bx::DILocation>>,
+
+ llfn: Bx::Function,
+
+ cx: &'a Bx::CodegenCx,
+
+ fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>,
+
+ /// When unwinding is initiated, we have to store this personality
+ /// value somewhere so that we can load it and re-use it in the
+ /// resume instruction. The personality is (afaik) some kind of
+ /// value used for C++ unwinding, which must filter by type: we
+ /// don't really care about it very much. Anyway, this value
+ /// contains an alloca into which the personality is stored and
+ /// then later loaded when generating the DIVERGE_BLOCK.
+ personality_slot: Option<PlaceRef<'tcx, Bx::Value>>,
+
+ /// A backend `BasicBlock` for each MIR `BasicBlock`, created lazily
+ /// as-needed (e.g. RPO reaching it or another block branching to it).
+ // FIXME(eddyb) rename `llbbs` and other `ll`-prefixed things to use a
+ // more backend-agnostic prefix such as `cg` (i.e. this would be `cgbbs`).
+ cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
+
+ /// The funclet status of each basic block
+ cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>,
+
+ /// When targeting MSVC, this stores the cleanup info for each funclet BB.
+ /// This is initialized at the same time as the `landing_pads` entry for the
+ /// funclets' head block, i.e. when needed by an unwind / `cleanup_ret` edge.
+ funclets: IndexVec<mir::BasicBlock, Option<Bx::Funclet>>,
+
+ /// This stores the cached landing/cleanup pad block for a given BB.
+ // FIXME(eddyb) rename this to `eh_pads`.
+ landing_pads: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
+
+ /// Cached unreachable block
+ unreachable_block: Option<Bx::BasicBlock>,
+
+ /// Cached double unwind guarding block
+ double_unwind_guard: Option<Bx::BasicBlock>,
+
+ /// The location where each MIR arg/var/tmp/ret is stored. This is
+ /// usually an `PlaceRef` representing an alloca, but not always:
+ /// sometimes we can skip the alloca and just store the value
+ /// directly using an `OperandRef`, which makes for tighter LLVM
+ /// IR. The conditions for using an `OperandRef` are as follows:
+ ///
+ /// - the type of the local must be judged "immediate" by `is_llvm_immediate`
+ /// - the operand must never be referenced indirectly
+ /// - we should not take its address using the `&` operator
+ /// - nor should it appear in a place path like `tmp.a`
+ /// - the operand must be defined by an rvalue that can generate immediate
+ /// values
+ ///
+ /// Avoiding allocs can also be important for certain intrinsics,
+ /// notably `expect`.
+ locals: IndexVec<mir::Local, LocalRef<'tcx, Bx::Value>>,
+
+ /// All `VarDebugInfo` from the MIR body, partitioned by `Local`.
+ /// This is `None` if no var`#[non_exhaustive]`iable debuginfo/names are needed.
+ per_local_var_debug_info:
+ Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>>,
+
+ /// Caller location propagated if this function has `#[track_caller]`.
+ caller_location: Option<OperandRef<'tcx, Bx::Value>>,
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn monomorphize<T>(&self, value: T) -> T
+ where
+ T: Copy + TypeFoldable<'tcx>,
+ {
+ debug!("monomorphize: self.instance={:?}", self.instance);
+ self.instance.subst_mir_and_normalize_erasing_regions(
+ self.cx.tcx(),
+ ty::ParamEnv::reveal_all(),
+ value,
+ )
+ }
+}
+
+enum LocalRef<'tcx, V> {
+ Place(PlaceRef<'tcx, V>),
+ /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place).
+ /// `*p` is the fat pointer that references the actual unsized place.
+ /// Every time it is initialized, we have to reallocate the place
+ /// and update the fat pointer. That's the reason why it is indirect.
+ UnsizedPlace(PlaceRef<'tcx, V>),
+ Operand(Option<OperandRef<'tcx, V>>),
+}
+
+impl<'a, 'tcx, V: CodegenObject> LocalRef<'tcx, V> {
+ fn new_operand<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ layout: TyAndLayout<'tcx>,
+ ) -> LocalRef<'tcx, V> {
+ if layout.is_zst() {
+ // Zero-size temporaries aren't always initialized, which
+ // doesn't matter because they don't contain data, but
+ // we need something in the operand.
+ LocalRef::Operand(Some(OperandRef::new_zst(bx, layout)))
+ } else {
+ LocalRef::Operand(None)
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+#[instrument(level = "debug", skip(cx))]
+pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ cx: &'a Bx::CodegenCx,
+ instance: Instance<'tcx>,
+) {
+ assert!(!instance.substs.needs_infer());
+
+ let llfn = cx.get_fn(instance);
+
+ let mir = cx.tcx().instance_mir(instance.def);
+
+ let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
+ debug!("fn_abi: {:?}", fn_abi);
+
+ let debug_context = cx.create_function_debug_context(instance, &fn_abi, llfn, &mir);
+
+ let start_llbb = Bx::append_block(cx, llfn, "start");
+ let mut bx = Bx::build(cx, start_llbb);
+
+ if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) {
+ bx.set_personality_fn(cx.eh_personality());
+ }
+
+ let cleanup_kinds = analyze::cleanup_kinds(&mir);
+ let cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>> = mir
+ .basic_blocks()
+ .indices()
+ .map(|bb| if bb == mir::START_BLOCK { Some(start_llbb) } else { None })
+ .collect();
+
+ let mut fx = FunctionCx {
+ instance,
+ mir,
+ llfn,
+ fn_abi,
+ cx,
+ personality_slot: None,
+ cached_llbbs,
+ unreachable_block: None,
+ double_unwind_guard: None,
+ cleanup_kinds,
+ landing_pads: IndexVec::from_elem(None, mir.basic_blocks()),
+ funclets: IndexVec::from_fn_n(|_| None, mir.basic_blocks().len()),
+ locals: IndexVec::new(),
+ debug_context,
+ per_local_var_debug_info: None,
+ caller_location: None,
+ };
+
+ fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut bx);
+
+ // Evaluate all required consts; codegen later assumes that CTFE will never fail.
+ let mut all_consts_ok = true;
+ for const_ in &mir.required_consts {
+ if let Err(err) = fx.eval_mir_constant(const_) {
+ all_consts_ok = false;
+ match err {
+ // errored or at least linted
+ ErrorHandled::Reported(_) | ErrorHandled::Linted => {}
+ ErrorHandled::TooGeneric => {
+ span_bug!(const_.span, "codgen encountered polymorphic constant: {:?}", err)
+ }
+ }
+ }
+ }
+ if !all_consts_ok {
+ // We leave the IR in some half-built state here, and rely on this code not even being
+ // submitted to LLVM once an error was raised.
+ return;
+ }
+
+ let memory_locals = analyze::non_ssa_locals(&fx);
+
+ // Allocate variable and temp allocas
+ fx.locals = {
+ let args = arg_local_refs(&mut bx, &mut fx, &memory_locals);
+
+ let mut allocate_local = |local| {
+ let decl = &mir.local_decls[local];
+ let layout = bx.layout_of(fx.monomorphize(decl.ty));
+ assert!(!layout.ty.has_erasable_regions());
+
+ if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() {
+ debug!("alloc: {:?} (return place) -> place", local);
+ let llretptr = bx.get_param(0);
+ return LocalRef::Place(PlaceRef::new_sized(llretptr, layout));
+ }
+
+ if memory_locals.contains(local) {
+ debug!("alloc: {:?} -> place", local);
+ if layout.is_unsized() {
+ LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut bx, layout))
+ } else {
+ LocalRef::Place(PlaceRef::alloca(&mut bx, layout))
+ }
+ } else {
+ debug!("alloc: {:?} -> operand", local);
+ LocalRef::new_operand(&mut bx, layout)
+ }
+ };
+
+ let retptr = allocate_local(mir::RETURN_PLACE);
+ iter::once(retptr)
+ .chain(args.into_iter())
+ .chain(mir.vars_and_temps_iter().map(allocate_local))
+ .collect()
+ };
+
+ // Apply debuginfo to the newly allocated locals.
+ fx.debug_introduce_locals(&mut bx);
+
+ // Codegen the body of each block using reverse postorder
+ for (bb, _) in traversal::reverse_postorder(&mir) {
+ fx.codegen_block(bb);
+ }
+}
+
+/// Produces, for each argument, a `Value` pointing at the
+/// argument's value. As arguments are places, these are always
+/// indirect.
+fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ memory_locals: &BitSet<mir::Local>,
+) -> Vec<LocalRef<'tcx, Bx::Value>> {
+ let mir = fx.mir;
+ let mut idx = 0;
+ let mut llarg_idx = fx.fn_abi.ret.is_indirect() as usize;
+
+ let mut num_untupled = None;
+
+ let args = mir
+ .args_iter()
+ .enumerate()
+ .map(|(arg_index, local)| {
+ let arg_decl = &mir.local_decls[local];
+
+ if Some(local) == mir.spread_arg {
+ // This argument (e.g., the last argument in the "rust-call" ABI)
+ // is a tuple that was spread at the ABI level and now we have
+ // to reconstruct it into a tuple local variable, from multiple
+ // individual LLVM function arguments.
+
+ let arg_ty = fx.monomorphize(arg_decl.ty);
+ let ty::Tuple(tupled_arg_tys) = arg_ty.kind() else {
+ bug!("spread argument isn't a tuple?!");
+ };
+
+ let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
+ for i in 0..tupled_arg_tys.len() {
+ let arg = &fx.fn_abi.args[idx];
+ idx += 1;
+ if arg.pad.is_some() {
+ llarg_idx += 1;
+ }
+ let pr_field = place.project_field(bx, i);
+ bx.store_fn_arg(arg, &mut llarg_idx, pr_field);
+ }
+ assert_eq!(
+ None,
+ num_untupled.replace(tupled_arg_tys.len()),
+ "Replaced existing num_tupled"
+ );
+
+ return LocalRef::Place(place);
+ }
+
+ if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() {
+ let arg_ty = fx.monomorphize(arg_decl.ty);
+
+ let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
+ bx.va_start(va_list.llval);
+
+ return LocalRef::Place(va_list);
+ }
+
+ let arg = &fx.fn_abi.args[idx];
+ idx += 1;
+ if arg.pad.is_some() {
+ llarg_idx += 1;
+ }
+
+ if !memory_locals.contains(local) {
+ // We don't have to cast or keep the argument in the alloca.
+ // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
+ // of putting everything in allocas just so we can use llvm.dbg.declare.
+ let local = |op| LocalRef::Operand(Some(op));
+ match arg.mode {
+ PassMode::Ignore => {
+ return local(OperandRef::new_zst(bx, arg.layout));
+ }
+ PassMode::Direct(_) => {
+ let llarg = bx.get_param(llarg_idx);
+ llarg_idx += 1;
+ return local(OperandRef::from_immediate_or_packed_pair(
+ bx, llarg, arg.layout,
+ ));
+ }
+ PassMode::Pair(..) => {
+ let (a, b) = (bx.get_param(llarg_idx), bx.get_param(llarg_idx + 1));
+ llarg_idx += 2;
+
+ return local(OperandRef {
+ val: OperandValue::Pair(a, b),
+ layout: arg.layout,
+ });
+ }
+ _ => {}
+ }
+ }
+
+ if arg.is_sized_indirect() {
+ // Don't copy an indirect argument to an alloca, the caller
+ // already put it in a temporary alloca and gave it up.
+ // FIXME: lifetimes
+ let llarg = bx.get_param(llarg_idx);
+ llarg_idx += 1;
+ LocalRef::Place(PlaceRef::new_sized(llarg, arg.layout))
+ } else if arg.is_unsized_indirect() {
+ // As the storage for the indirect argument lives during
+ // the whole function call, we just copy the fat pointer.
+ let llarg = bx.get_param(llarg_idx);
+ llarg_idx += 1;
+ let llextra = bx.get_param(llarg_idx);
+ llarg_idx += 1;
+ let indirect_operand = OperandValue::Pair(llarg, llextra);
+
+ let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout);
+ indirect_operand.store(bx, tmp);
+ LocalRef::UnsizedPlace(tmp)
+ } else {
+ let tmp = PlaceRef::alloca(bx, arg.layout);
+ bx.store_fn_arg(arg, &mut llarg_idx, tmp);
+ LocalRef::Place(tmp)
+ }
+ })
+ .collect::<Vec<_>>();
+
+ if fx.instance.def.requires_caller_location(bx.tcx()) {
+ let mir_args = if let Some(num_untupled) = num_untupled {
+ // Subtract off the tupled argument that gets 'expanded'
+ args.len() - 1 + num_untupled
+ } else {
+ args.len()
+ };
+ assert_eq!(
+ fx.fn_abi.args.len(),
+ mir_args + 1,
+ "#[track_caller] instance {:?} must have 1 more argument in their ABI than in their MIR",
+ fx.instance
+ );
+
+ let arg = fx.fn_abi.args.last().unwrap();
+ match arg.mode {
+ PassMode::Direct(_) => (),
+ _ => bug!("caller location must be PassMode::Direct, found {:?}", arg.mode),
+ }
+
+ fx.caller_location = Some(OperandRef {
+ val: OperandValue::Immediate(bx.get_param(llarg_idx)),
+ layout: arg.layout,
+ });
+ }
+
+ args
+}
+
+mod analyze;
+mod block;
+pub mod constant;
+pub mod coverageinfo;
+pub mod debuginfo;
+mod intrinsic;
+pub mod operand;
+pub mod place;
+mod rvalue;
+mod statement;
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
new file mode 100644
index 000000000..c612634fc
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -0,0 +1,461 @@
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+use crate::base;
+use crate::glue;
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{ConstValue, Pointer, Scalar};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::Ty;
+use rustc_target::abi::{Abi, Align, Size};
+
+use std::fmt;
+
+/// The representation of a Rust value. The enum variant is in fact
+/// uniquely determined by the value's type, but is kept as a
+/// safety check.
+#[derive(Copy, Clone, Debug)]
+pub enum OperandValue<V> {
+ /// A reference to the actual operand. The data is guaranteed
+ /// to be valid for the operand's lifetime.
+ /// The second value, if any, is the extra data (vtable or length)
+ /// which indicates that it refers to an unsized rvalue.
+ Ref(V, Option<V>, Align),
+ /// A single LLVM value.
+ Immediate(V),
+ /// A pair of immediate LLVM values. Used by fat pointers too.
+ Pair(V, V),
+}
+
+/// An `OperandRef` is an "SSA" reference to a Rust value, along with
+/// its type.
+///
+/// NOTE: unless you know a value's type exactly, you should not
+/// generate LLVM opcodes acting on it and instead act via methods,
+/// to avoid nasty edge cases. In particular, using `Builder::store`
+/// directly is sure to cause problems -- use `OperandRef::store`
+/// instead.
+#[derive(Copy, Clone)]
+pub struct OperandRef<'tcx, V> {
+ // The value.
+ pub val: OperandValue<V>,
+
+ // The layout of value, based on its Rust type.
+ pub layout: TyAndLayout<'tcx>,
+}
+
+impl<V: CodegenObject> fmt::Debug for OperandRef<'_, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout)
+ }
+}
+
+impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
+ pub fn new_zst<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ layout: TyAndLayout<'tcx>,
+ ) -> OperandRef<'tcx, V> {
+ assert!(layout.is_zst());
+ OperandRef {
+ val: OperandValue::Immediate(bx.const_undef(bx.immediate_backend_type(layout))),
+ layout,
+ }
+ }
+
+ pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ val: ConstValue<'tcx>,
+ ty: Ty<'tcx>,
+ ) -> Self {
+ let layout = bx.layout_of(ty);
+
+ if layout.is_zst() {
+ return OperandRef::new_zst(bx, layout);
+ }
+
+ let val = match val {
+ ConstValue::Scalar(x) => {
+ let Abi::Scalar(scalar) = layout.abi else {
+ bug!("from_const: invalid ByVal layout: {:#?}", layout);
+ };
+ let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
+ OperandValue::Immediate(llval)
+ }
+ ConstValue::ZeroSized => {
+ let llval = bx.zst_to_backend(bx.immediate_backend_type(layout));
+ OperandValue::Immediate(llval)
+ }
+ ConstValue::Slice { data, start, end } => {
+ let Abi::ScalarPair(a_scalar, _) = layout.abi else {
+ bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
+ };
+ let a = Scalar::from_pointer(
+ Pointer::new(bx.tcx().create_memory_alloc(data), Size::from_bytes(start)),
+ &bx.tcx(),
+ );
+ let a_llval = bx.scalar_to_backend(
+ a,
+ a_scalar,
+ bx.scalar_pair_element_backend_type(layout, 0, true),
+ );
+ let b_llval = bx.const_usize((end - start) as u64);
+ OperandValue::Pair(a_llval, b_llval)
+ }
+ ConstValue::ByRef { alloc, offset } => {
+ return bx.load_operand(bx.from_const_alloc(layout, alloc, offset));
+ }
+ };
+
+ OperandRef { val, layout }
+ }
+
+ /// Asserts that this operand refers to a scalar and returns
+ /// a reference to its value.
+ pub fn immediate(self) -> V {
+ match self.val {
+ OperandValue::Immediate(s) => s,
+ _ => bug!("not immediate: {:?}", self),
+ }
+ }
+
+ pub fn deref<Cx: LayoutTypeMethods<'tcx>>(self, cx: &Cx) -> PlaceRef<'tcx, V> {
+ if self.layout.ty.is_box() {
+ bug!("dereferencing {:?} in codegen", self.layout.ty);
+ }
+
+ let projected_ty = self
+ .layout
+ .ty
+ .builtin_deref(true)
+ .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self))
+ .ty;
+
+ let (llptr, llextra) = match self.val {
+ OperandValue::Immediate(llptr) => (llptr, None),
+ OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)),
+ OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self),
+ };
+ let layout = cx.layout_of(projected_ty);
+ PlaceRef { llval: llptr, llextra, layout, align: layout.align.abi }
+ }
+
+ /// If this operand is a `Pair`, we return an aggregate with the two values.
+ /// For other cases, see `immediate`.
+ pub fn immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ ) -> V {
+ if let OperandValue::Pair(a, b) = self.val {
+ let llty = bx.cx().backend_type(self.layout);
+ debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty);
+ // Reconstruct the immediate aggregate.
+ let mut llpair = bx.cx().const_undef(llty);
+ let imm_a = bx.from_immediate(a);
+ let imm_b = bx.from_immediate(b);
+ llpair = bx.insert_value(llpair, imm_a, 0);
+ llpair = bx.insert_value(llpair, imm_b, 1);
+ llpair
+ } else {
+ self.immediate()
+ }
+ }
+
+ /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`.
+ pub fn from_immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ llval: V,
+ layout: TyAndLayout<'tcx>,
+ ) -> Self {
+ let val = if let Abi::ScalarPair(a, b) = layout.abi {
+ debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout);
+
+ // Deconstruct the immediate aggregate.
+ let a_llval = bx.extract_value(llval, 0);
+ let a_llval = bx.to_immediate_scalar(a_llval, a);
+ let b_llval = bx.extract_value(llval, 1);
+ let b_llval = bx.to_immediate_scalar(b_llval, b);
+ OperandValue::Pair(a_llval, b_llval)
+ } else {
+ OperandValue::Immediate(llval)
+ };
+ OperandRef { val, layout }
+ }
+
+ pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ &self,
+ bx: &mut Bx,
+ i: usize,
+ ) -> Self {
+ let field = self.layout.field(bx.cx(), i);
+ let offset = self.layout.fields.offset(i);
+
+ let mut val = match (self.val, self.layout.abi) {
+ // If the field is ZST, it has no data.
+ _ if field.is_zst() => {
+ return OperandRef::new_zst(bx, field);
+ }
+
+ // Newtype of a scalar, scalar pair or vector.
+ (OperandValue::Immediate(_) | OperandValue::Pair(..), _)
+ if field.size == self.layout.size =>
+ {
+ assert_eq!(offset.bytes(), 0);
+ self.val
+ }
+
+ // Extract a scalar component from a pair.
+ (OperandValue::Pair(a_llval, b_llval), Abi::ScalarPair(a, b)) => {
+ if offset.bytes() == 0 {
+ assert_eq!(field.size, a.size(bx.cx()));
+ OperandValue::Immediate(a_llval)
+ } else {
+ assert_eq!(offset, a.size(bx.cx()).align_to(b.align(bx.cx()).abi));
+ assert_eq!(field.size, b.size(bx.cx()));
+ OperandValue::Immediate(b_llval)
+ }
+ }
+
+ // `#[repr(simd)]` types are also immediate.
+ (OperandValue::Immediate(llval), Abi::Vector { .. }) => {
+ OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64)))
+ }
+
+ _ => bug!("OperandRef::extract_field({:?}): not applicable", self),
+ };
+
+ match (&mut val, field.abi) {
+ (OperandValue::Immediate(llval), _) => {
+ // Bools in union fields needs to be truncated.
+ *llval = bx.to_immediate(*llval, field);
+ // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
+ *llval = bx.bitcast(*llval, bx.cx().immediate_backend_type(field));
+ }
+ (OperandValue::Pair(a, b), Abi::ScalarPair(a_abi, b_abi)) => {
+ // Bools in union fields needs to be truncated.
+ *a = bx.to_immediate_scalar(*a, a_abi);
+ *b = bx.to_immediate_scalar(*b, b_abi);
+ // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
+ *a = bx.bitcast(*a, bx.cx().scalar_pair_element_backend_type(field, 0, true));
+ *b = bx.bitcast(*b, bx.cx().scalar_pair_element_backend_type(field, 1, true));
+ }
+ (OperandValue::Pair(..), _) => bug!(),
+ (OperandValue::Ref(..), _) => bug!(),
+ }
+
+ OperandRef { val, layout: field }
+ }
+}
+
+impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
+ pub fn store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ dest: PlaceRef<'tcx, V>,
+ ) {
+ self.store_with_flags(bx, dest, MemFlags::empty());
+ }
+
+ pub fn volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ dest: PlaceRef<'tcx, V>,
+ ) {
+ self.store_with_flags(bx, dest, MemFlags::VOLATILE);
+ }
+
+ pub fn unaligned_volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ dest: PlaceRef<'tcx, V>,
+ ) {
+ self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED);
+ }
+
+ pub fn nontemporal_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ dest: PlaceRef<'tcx, V>,
+ ) {
+ self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL);
+ }
+
+ fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ dest: PlaceRef<'tcx, V>,
+ flags: MemFlags,
+ ) {
+ debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
+ // Avoid generating stores of zero-sized values, because the only way to have a zero-sized
+ // value is through `undef`, and store itself is useless.
+ if dest.layout.is_zst() {
+ return;
+ }
+ match self {
+ OperandValue::Ref(r, None, source_align) => {
+ if flags.contains(MemFlags::NONTEMPORAL) {
+ // HACK(nox): This is inefficient but there is no nontemporal memcpy.
+ let ty = bx.backend_type(dest.layout);
+ let ptr = bx.pointercast(r, bx.type_ptr_to(ty));
+ let val = bx.load(ty, ptr, source_align);
+ bx.store_with_flags(val, dest.llval, dest.align, flags);
+ return;
+ }
+ base::memcpy_ty(bx, dest.llval, dest.align, r, source_align, dest.layout, flags)
+ }
+ OperandValue::Ref(_, Some(_), _) => {
+ bug!("cannot directly store unsized values");
+ }
+ OperandValue::Immediate(s) => {
+ let val = bx.from_immediate(s);
+ bx.store_with_flags(val, dest.llval, dest.align, flags);
+ }
+ OperandValue::Pair(a, b) => {
+ let Abi::ScalarPair(a_scalar, b_scalar) = dest.layout.abi else {
+ bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout);
+ };
+ let ty = bx.backend_type(dest.layout);
+ let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
+
+ let llptr = bx.struct_gep(ty, dest.llval, 0);
+ let val = bx.from_immediate(a);
+ let align = dest.align;
+ bx.store_with_flags(val, llptr, align, flags);
+
+ let llptr = bx.struct_gep(ty, dest.llval, 1);
+ let val = bx.from_immediate(b);
+ let align = dest.align.restrict_for_offset(b_offset);
+ bx.store_with_flags(val, llptr, align, flags);
+ }
+ }
+ }
+
+ pub fn store_unsized<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ indirect_dest: PlaceRef<'tcx, V>,
+ ) {
+ debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest);
+ let flags = MemFlags::empty();
+
+ // `indirect_dest` must have `*mut T` type. We extract `T` out of it.
+ let unsized_ty = indirect_dest
+ .layout
+ .ty
+ .builtin_deref(true)
+ .unwrap_or_else(|| bug!("indirect_dest has non-pointer type: {:?}", indirect_dest))
+ .ty;
+
+ let OperandValue::Ref(llptr, Some(llextra), _) = self else {
+ bug!("store_unsized called with a sized value")
+ };
+
+ // FIXME: choose an appropriate alignment, or use dynamic align somehow
+ let max_align = Align::from_bits(128).unwrap();
+ let min_align = Align::from_bits(8).unwrap();
+
+ // Allocate an appropriate region on the stack, and copy the value into it
+ let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
+ let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, max_align);
+ bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags);
+
+ // Store the allocated region and the extra to the indirect place.
+ let indirect_operand = OperandValue::Pair(lldst, llextra);
+ indirect_operand.store(bx, indirect_dest);
+ }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ fn maybe_codegen_consume_direct(
+ &mut self,
+ bx: &mut Bx,
+ place_ref: mir::PlaceRef<'tcx>,
+ ) -> Option<OperandRef<'tcx, Bx::Value>> {
+ debug!("maybe_codegen_consume_direct(place_ref={:?})", place_ref);
+
+ match self.locals[place_ref.local] {
+ LocalRef::Operand(Some(mut o)) => {
+ // Moves out of scalar and scalar pair fields are trivial.
+ for elem in place_ref.projection.iter() {
+ match elem {
+ mir::ProjectionElem::Field(ref f, _) => {
+ o = o.extract_field(bx, f.index());
+ }
+ mir::ProjectionElem::Index(_)
+ | mir::ProjectionElem::ConstantIndex { .. } => {
+ // ZSTs don't require any actual memory access.
+ // FIXME(eddyb) deduplicate this with the identical
+ // checks in `codegen_consume` and `extract_field`.
+ let elem = o.layout.field(bx.cx(), 0);
+ if elem.is_zst() {
+ o = OperandRef::new_zst(bx, elem);
+ } else {
+ return None;
+ }
+ }
+ _ => return None,
+ }
+ }
+
+ Some(o)
+ }
+ LocalRef::Operand(None) => {
+ bug!("use of {:?} before def", place_ref);
+ }
+ LocalRef::Place(..) | LocalRef::UnsizedPlace(..) => {
+ // watch out for locals that do not have an
+ // alloca; they are handled somewhat differently
+ None
+ }
+ }
+ }
+
+ pub fn codegen_consume(
+ &mut self,
+ bx: &mut Bx,
+ place_ref: mir::PlaceRef<'tcx>,
+ ) -> OperandRef<'tcx, Bx::Value> {
+ debug!("codegen_consume(place_ref={:?})", place_ref);
+
+ let ty = self.monomorphized_place_ty(place_ref);
+ let layout = bx.cx().layout_of(ty);
+
+ // ZSTs don't require any actual memory access.
+ if layout.is_zst() {
+ return OperandRef::new_zst(bx, layout);
+ }
+
+ if let Some(o) = self.maybe_codegen_consume_direct(bx, place_ref) {
+ return o;
+ }
+
+ // for most places, to consume them we just load them
+ // out from their home
+ let place = self.codegen_place(bx, place_ref);
+ bx.load_operand(place)
+ }
+
+ pub fn codegen_operand(
+ &mut self,
+ bx: &mut Bx,
+ operand: &mir::Operand<'tcx>,
+ ) -> OperandRef<'tcx, Bx::Value> {
+ debug!("codegen_operand(operand={:?})", operand);
+
+ match *operand {
+ mir::Operand::Copy(ref place) | mir::Operand::Move(ref place) => {
+ self.codegen_consume(bx, place.as_ref())
+ }
+
+ mir::Operand::Constant(ref constant) => {
+ // This cannot fail because we checked all required_consts in advance.
+ self.eval_mir_constant_to_operand(bx, constant).unwrap_or_else(|_err| {
+ span_bug!(constant.span, "erroneous constant not captured by required_consts")
+ })
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs
new file mode 100644
index 000000000..268c4d765
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/place.rs
@@ -0,0 +1,549 @@
+use super::operand::OperandValue;
+use super::{FunctionCx, LocalRef};
+
+use crate::common::IntPredicate;
+use crate::glue;
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_middle::mir;
+use rustc_middle::mir::tcx::PlaceTy;
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
+use rustc_middle::ty::{self, Ty};
+use rustc_target::abi::{Abi, Align, FieldsShape, Int, TagEncoding};
+use rustc_target::abi::{VariantIdx, Variants};
+
+#[derive(Copy, Clone, Debug)]
+pub struct PlaceRef<'tcx, V> {
+ /// A pointer to the contents of the place.
+ pub llval: V,
+
+ /// This place's extra data if it is unsized, or `None` if null.
+ pub llextra: Option<V>,
+
+ /// The monomorphized type of this place, including variant information.
+ pub layout: TyAndLayout<'tcx>,
+
+ /// The alignment we know for this place.
+ pub align: Align,
+}
+
+impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
+ pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
+ assert!(!layout.is_unsized());
+ PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
+ }
+
+ pub fn new_sized_aligned(
+ llval: V,
+ layout: TyAndLayout<'tcx>,
+ align: Align,
+ ) -> PlaceRef<'tcx, V> {
+ assert!(!layout.is_unsized());
+ PlaceRef { llval, llextra: None, layout, align }
+ }
+
+ // FIXME(eddyb) pass something else for the name so no work is done
+ // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
+ pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ layout: TyAndLayout<'tcx>,
+ ) -> Self {
+ assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
+ let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
+ Self::new_sized(tmp, layout)
+ }
+
+ /// Returns a place for an indirect reference to an unsized place.
+ // FIXME(eddyb) pass something else for the name so no work is done
+ // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
+ pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ layout: TyAndLayout<'tcx>,
+ ) -> Self {
+ assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
+ let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
+ let ptr_layout = bx.cx().layout_of(ptr_ty);
+ Self::alloca(bx, ptr_layout)
+ }
+
+ pub fn len<Cx: ConstMethods<'tcx, Value = V>>(&self, cx: &Cx) -> V {
+ if let FieldsShape::Array { count, .. } = self.layout.fields {
+ if self.layout.is_unsized() {
+ assert_eq!(count, 0);
+ self.llextra.unwrap()
+ } else {
+ cx.const_usize(count)
+ }
+ } else {
+ bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
+ }
+ }
+}
+
+impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
+ /// Access a field, at a point when the value's case is known.
+ pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ ix: usize,
+ ) -> Self {
+ let field = self.layout.field(bx.cx(), ix);
+ let offset = self.layout.fields.offset(ix);
+ let effective_field_align = self.align.restrict_for_offset(offset);
+
+ let mut simple = || {
+ let llval = match self.layout.abi {
+ _ if offset.bytes() == 0 => {
+ // Unions and newtypes only use an offset of 0.
+ // Also handles the first field of Scalar, ScalarPair, and Vector layouts.
+ self.llval
+ }
+ Abi::ScalarPair(a, b)
+ if offset == a.size(bx.cx()).align_to(b.align(bx.cx()).abi) =>
+ {
+ // Offset matches second field.
+ let ty = bx.backend_type(self.layout);
+ bx.struct_gep(ty, self.llval, 1)
+ }
+ Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => {
+ // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer.
+ let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
+ bx.gep(bx.cx().type_i8(), byte_ptr, &[bx.const_usize(offset.bytes())])
+ }
+ Abi::Scalar(_) | Abi::ScalarPair(..) => {
+ // All fields of Scalar and ScalarPair layouts must have been handled by this point.
+ // Vector layouts have additional fields for each element of the vector, so don't panic in that case.
+ bug!(
+ "offset of non-ZST field `{:?}` does not match layout `{:#?}`",
+ field,
+ self.layout
+ );
+ }
+ _ => {
+ let ty = bx.backend_type(self.layout);
+ bx.struct_gep(ty, self.llval, bx.cx().backend_field_index(self.layout, ix))
+ }
+ };
+ PlaceRef {
+ // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
+ llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
+ llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
+ layout: field,
+ align: effective_field_align,
+ }
+ };
+
+ // Simple cases, which don't need DST adjustment:
+ // * no metadata available - just log the case
+ // * known alignment - sized types, `[T]`, `str` or a foreign type
+ // * packed struct - there is no alignment padding
+ match field.ty.kind() {
+ _ if self.llextra.is_none() => {
+ debug!(
+ "unsized field `{}`, of `{:?}` has no metadata for adjustment",
+ ix, self.llval
+ );
+ return simple();
+ }
+ _ if !field.is_unsized() => return simple(),
+ ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
+ ty::Adt(def, _) => {
+ if def.repr().packed() {
+ // FIXME(eddyb) generalize the adjustment when we
+ // start supporting packing to larger alignments.
+ assert_eq!(self.layout.align.abi.bytes(), 1);
+ return simple();
+ }
+ }
+ _ => {}
+ }
+
+ // We need to get the pointer manually now.
+ // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
+ // We do this instead of, say, simply adjusting the pointer from the result of a GEP
+ // because the field may have an arbitrary alignment in the LLVM representation
+ // anyway.
+ //
+ // To demonstrate:
+ //
+ // struct Foo<T: ?Sized> {
+ // x: u16,
+ // y: T
+ // }
+ //
+ // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
+ // the `y` field has 16-bit alignment.
+
+ let meta = self.llextra;
+
+ let unaligned_offset = bx.cx().const_usize(offset.bytes());
+
+ // Get the alignment of the field
+ let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
+
+ // Bump the unaligned offset up to the appropriate alignment
+ let offset = round_up_const_value_to_alignment(bx, unaligned_offset, unsized_align);
+
+ debug!("struct_field_ptr: DST field offset: {:?}", offset);
+
+ // Cast and adjust pointer.
+ let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
+ let byte_ptr = bx.gep(bx.cx().type_i8(), byte_ptr, &[offset]);
+
+ // Finally, cast back to the type expected.
+ let ll_fty = bx.cx().backend_type(field);
+ debug!("struct_field_ptr: Field type is {:?}", ll_fty);
+
+ PlaceRef {
+ llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
+ llextra: self.llextra,
+ layout: field,
+ align: effective_field_align,
+ }
+ }
+
+ /// Obtain the actual discriminant of a value.
+ #[instrument(level = "trace", skip(bx))]
+ pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ cast_to: Ty<'tcx>,
+ ) -> V {
+ let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
+ if self.layout.abi.is_uninhabited() {
+ return bx.cx().const_undef(cast_to);
+ }
+ let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants {
+ Variants::Single { index } => {
+ let discr_val = self
+ .layout
+ .ty
+ .discriminant_for_variant(bx.cx().tcx(), index)
+ .map_or(index.as_u32() as u128, |discr| discr.val);
+ return bx.cx().const_uint_big(cast_to, discr_val);
+ }
+ Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
+ (tag, tag_encoding, tag_field)
+ }
+ };
+
+ // Read the tag/niche-encoded discriminant from memory.
+ let tag = self.project_field(bx, tag_field);
+ let tag = bx.load_operand(tag);
+
+ // Decode the discriminant (specifically if it's niche-encoded).
+ match *tag_encoding {
+ TagEncoding::Direct => {
+ let signed = match tag_scalar.primitive() {
+ // We use `i1` for bytes that are always `0` or `1`,
+ // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
+ // let LLVM interpret the `i1` as signed, because
+ // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
+ Int(_, signed) => !tag_scalar.is_bool() && signed,
+ _ => false,
+ };
+ bx.intcast(tag.immediate(), cast_to, signed)
+ }
+ TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
+ // Rebase from niche values to discriminants, and check
+ // whether the result is in range for the niche variants.
+ let niche_llty = bx.cx().immediate_backend_type(tag.layout);
+ let tag = tag.immediate();
+
+ // We first compute the "relative discriminant" (wrt `niche_variants`),
+ // that is, if `n = niche_variants.end() - niche_variants.start()`,
+ // we remap `niche_start..=niche_start + n` (which may wrap around)
+ // to (non-wrap-around) `0..=n`, to be able to check whether the
+ // discriminant corresponds to a niche variant with one comparison.
+ // We also can't go directly to the (variant index) discriminant
+ // and check that it is in the range `niche_variants`, because
+ // that might not fit in the same type, on top of needing an extra
+ // comparison (see also the comment on `let niche_discr`).
+ let relative_discr = if niche_start == 0 {
+ // Avoid subtracting `0`, which wouldn't work for pointers.
+ // FIXME(eddyb) check the actual primitive type here.
+ tag
+ } else {
+ bx.sub(tag, bx.cx().const_uint_big(niche_llty, niche_start))
+ };
+ let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
+ let is_niche = if relative_max == 0 {
+ // Avoid calling `const_uint`, which wouldn't work for pointers.
+ // Also use canonical == 0 instead of non-canonical u<= 0.
+ // FIXME(eddyb) check the actual primitive type here.
+ bx.icmp(IntPredicate::IntEQ, relative_discr, bx.cx().const_null(niche_llty))
+ } else {
+ let relative_max = bx.cx().const_uint(niche_llty, relative_max as u64);
+ bx.icmp(IntPredicate::IntULE, relative_discr, relative_max)
+ };
+
+ // NOTE(eddyb) this addition needs to be performed on the final
+ // type, in case the niche itself can't represent all variant
+ // indices (e.g. `u8` niche with more than `256` variants,
+ // but enough uninhabited variants so that the remaining variants
+ // fit in the niche).
+ // In other words, `niche_variants.end - niche_variants.start`
+ // is representable in the niche, but `niche_variants.end`
+ // might not be, in extreme cases.
+ let niche_discr = {
+ let relative_discr = if relative_max == 0 {
+ // HACK(eddyb) since we have only one niche, we know which
+ // one it is, and we can avoid having a dynamic value here.
+ bx.cx().const_uint(cast_to, 0)
+ } else {
+ bx.intcast(relative_discr, cast_to, false)
+ };
+ bx.add(
+ relative_discr,
+ bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
+ )
+ };
+
+ bx.select(
+ is_niche,
+ niche_discr,
+ bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64),
+ )
+ }
+ }
+ }
+
+ /// Sets the discriminant for a new value of the given case of the given
+ /// representation.
+ pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ &self,
+ bx: &mut Bx,
+ variant_index: VariantIdx,
+ ) {
+ if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
+ // We play it safe by using a well-defined `abort`, but we could go for immediate UB
+ // if that turns out to be helpful.
+ bx.abort();
+ return;
+ }
+ match self.layout.variants {
+ Variants::Single { index } => {
+ assert_eq!(index, variant_index);
+ }
+ Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => {
+ let ptr = self.project_field(bx, tag_field);
+ let to =
+ self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
+ bx.store(
+ bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
+ ptr.llval,
+ ptr.align,
+ );
+ }
+ Variants::Multiple {
+ tag_encoding:
+ TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
+ tag_field,
+ ..
+ } => {
+ if variant_index != dataful_variant {
+ if bx.cx().sess().target.arch == "arm"
+ || bx.cx().sess().target.arch == "aarch64"
+ {
+ // FIXME(#34427): as workaround for LLVM bug on ARM,
+ // use memset of 0 before assigning niche value.
+ let fill_byte = bx.cx().const_u8(0);
+ let size = bx.cx().const_usize(self.layout.size.bytes());
+ bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
+ }
+
+ let niche = self.project_field(bx, tag_field);
+ let niche_llty = bx.cx().immediate_backend_type(niche.layout);
+ let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
+ let niche_value = (niche_value as u128).wrapping_add(niche_start);
+ // FIXME(eddyb): check the actual primitive type here.
+ let niche_llval = if niche_value == 0 {
+ // HACK(eddyb): using `c_null` as it works on all types.
+ bx.cx().const_null(niche_llty)
+ } else {
+ bx.cx().const_uint_big(niche_llty, niche_value)
+ };
+ OperandValue::Immediate(niche_llval).store(bx, niche);
+ }
+ }
+ }
+ }
+
+ pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ &self,
+ bx: &mut Bx,
+ llindex: V,
+ ) -> Self {
+ // Statically compute the offset if we can, otherwise just use the element size,
+ // as this will yield the lowest alignment.
+ let layout = self.layout.field(bx, 0);
+ let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) {
+ layout.size.checked_mul(llindex, bx).unwrap_or(layout.size)
+ } else {
+ layout.size
+ };
+
+ PlaceRef {
+ llval: bx.inbounds_gep(
+ bx.cx().backend_type(self.layout),
+ self.llval,
+ &[bx.cx().const_usize(0), llindex],
+ ),
+ llextra: None,
+ layout,
+ align: self.align.restrict_for_offset(offset),
+ }
+ }
+
+ pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ &self,
+ bx: &mut Bx,
+ variant_index: VariantIdx,
+ ) -> Self {
+ let mut downcast = *self;
+ downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
+
+ // Cast to the appropriate variant struct type.
+ let variant_ty = bx.cx().backend_type(downcast.layout);
+ downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
+
+ downcast
+ }
+
+ pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
+ bx.lifetime_start(self.llval, self.layout.size);
+ }
+
+ pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
+ bx.lifetime_end(self.llval, self.layout.size);
+ }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ #[instrument(level = "trace", skip(self, bx))]
+ pub fn codegen_place(
+ &mut self,
+ bx: &mut Bx,
+ place_ref: mir::PlaceRef<'tcx>,
+ ) -> PlaceRef<'tcx, Bx::Value> {
+ let cx = self.cx;
+ let tcx = self.cx.tcx();
+
+ let mut base = 0;
+ let mut cg_base = match self.locals[place_ref.local] {
+ LocalRef::Place(place) => place,
+ LocalRef::UnsizedPlace(place) => bx.load_operand(place).deref(cx),
+ LocalRef::Operand(..) => {
+ if place_ref.has_deref() {
+ base = 1;
+ let cg_base = self.codegen_consume(
+ bx,
+ mir::PlaceRef { projection: &place_ref.projection[..0], ..place_ref },
+ );
+ cg_base.deref(bx.cx())
+ } else {
+ bug!("using operand local {:?} as place", place_ref);
+ }
+ }
+ };
+ for elem in place_ref.projection[base..].iter() {
+ cg_base = match *elem {
+ mir::ProjectionElem::Deref => bx.load_operand(cg_base).deref(bx.cx()),
+ mir::ProjectionElem::Field(ref field, _) => {
+ cg_base.project_field(bx, field.index())
+ }
+ mir::ProjectionElem::Index(index) => {
+ let index = &mir::Operand::Copy(mir::Place::from(index));
+ let index = self.codegen_operand(bx, index);
+ let llindex = index.immediate();
+ cg_base.project_index(bx, llindex)
+ }
+ mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => {
+ let lloffset = bx.cx().const_usize(offset as u64);
+ cg_base.project_index(bx, lloffset)
+ }
+ mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => {
+ let lloffset = bx.cx().const_usize(offset as u64);
+ let lllen = cg_base.len(bx.cx());
+ let llindex = bx.sub(lllen, lloffset);
+ cg_base.project_index(bx, llindex)
+ }
+ mir::ProjectionElem::Subslice { from, to, from_end } => {
+ let mut subslice = cg_base.project_index(bx, bx.cx().const_usize(from as u64));
+ let projected_ty =
+ PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, *elem).ty;
+ subslice.layout = bx.cx().layout_of(self.monomorphize(projected_ty));
+
+ if subslice.layout.is_unsized() {
+ assert!(from_end, "slice subslices should be `from_end`");
+ subslice.llextra = Some(bx.sub(
+ cg_base.llextra.unwrap(),
+ bx.cx().const_usize((from as u64) + (to as u64)),
+ ));
+ }
+
+ // Cast the place pointer type to the new
+ // array or slice type (`*[%_; new_len]`).
+ subslice.llval = bx.pointercast(
+ subslice.llval,
+ bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)),
+ );
+
+ subslice
+ }
+ mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v),
+ };
+ }
+ debug!("codegen_place(place={:?}) => {:?}", place_ref, cg_base);
+ cg_base
+ }
+
+ pub fn monomorphized_place_ty(&self, place_ref: mir::PlaceRef<'tcx>) -> Ty<'tcx> {
+ let tcx = self.cx.tcx();
+ let place_ty = place_ref.ty(self.mir, tcx);
+ self.monomorphize(place_ty.ty)
+ }
+}
+
+fn round_up_const_value_to_alignment<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ value: Bx::Value,
+ align: Bx::Value,
+) -> Bx::Value {
+ // In pseudo code:
+ //
+ // if value & (align - 1) == 0 {
+ // value
+ // } else {
+ // (value & !(align - 1)) + align
+ // }
+ //
+ // Usually this is written without branches as
+ //
+ // (value + align - 1) & !(align - 1)
+ //
+ // But this formula cannot take advantage of constant `value`. E.g. if `value` is known
+ // at compile time to be `1`, this expression should be optimized to `align`. However,
+ // optimization only holds if `align` is a power of two. Since the optimizer doesn't know
+ // that `align` is a power of two, it cannot perform this optimization.
+ //
+ // Instead we use
+ //
+ // value + (-value & (align - 1))
+ //
+ // Since `align` is used only once, the expression can be optimized. For `value = 0`
+ // its optimized to `0` even in debug mode.
+ //
+ // NB: The previous version of this code used
+ //
+ // (value + align - 1) & -align
+ //
+ // Even though `-align == !(align - 1)`, LLVM failed to optimize this even for
+ // `value = 0`. Bug report: https://bugs.llvm.org/show_bug.cgi?id=48559
+ let one = bx.const_usize(1);
+ let align_minus_1 = bx.sub(align, one);
+ let neg_value = bx.neg(value);
+ let offset = bx.and(neg_value, align_minus_1);
+ bx.add(value, offset)
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
new file mode 100644
index 000000000..26b9fbf44
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -0,0 +1,729 @@
+use super::operand::{OperandRef, OperandValue};
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+use crate::base;
+use crate::common::{self, IntPredicate};
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_middle::mir;
+use rustc_middle::mir::Operand;
+use rustc_middle::ty::cast::{CastTy, IntTy};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
+use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
+use rustc_span::source_map::{Span, DUMMY_SP};
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ #[instrument(level = "trace", skip(self, bx))]
+ pub fn codegen_rvalue(
+ &mut self,
+ mut bx: Bx,
+ dest: PlaceRef<'tcx, Bx::Value>,
+ rvalue: &mir::Rvalue<'tcx>,
+ ) -> Bx {
+ match *rvalue {
+ mir::Rvalue::Use(ref operand) => {
+ let cg_operand = self.codegen_operand(&mut bx, operand);
+ // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
+ // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
+ cg_operand.val.store(&mut bx, dest);
+ bx
+ }
+
+ mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
+ // The destination necessarily contains a fat pointer, so if
+ // it's a scalar pair, it's a fat pointer or newtype thereof.
+ if bx.cx().is_backend_scalar_pair(dest.layout) {
+ // Into-coerce of a thin pointer to a fat pointer -- just
+ // use the operand path.
+ let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
+ temp.val.store(&mut bx, dest);
+ return bx;
+ }
+
+ // Unsize of a nontrivial struct. I would prefer for
+ // this to be eliminated by MIR building, but
+ // `CoerceUnsized` can be passed by a where-clause,
+ // so the (generic) MIR may not be able to expand it.
+ let operand = self.codegen_operand(&mut bx, source);
+ match operand.val {
+ OperandValue::Pair(..) | OperandValue::Immediate(_) => {
+ // Unsize from an immediate structure. We don't
+ // really need a temporary alloca here, but
+ // avoiding it would require us to have
+ // `coerce_unsized_into` use `extractvalue` to
+ // index into the struct, and this case isn't
+ // important enough for it.
+ debug!("codegen_rvalue: creating ugly alloca");
+ let scratch = PlaceRef::alloca(&mut bx, operand.layout);
+ scratch.storage_live(&mut bx);
+ operand.val.store(&mut bx, scratch);
+ base::coerce_unsized_into(&mut bx, scratch, dest);
+ scratch.storage_dead(&mut bx);
+ }
+ OperandValue::Ref(llref, None, align) => {
+ let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
+ base::coerce_unsized_into(&mut bx, source, dest);
+ }
+ OperandValue::Ref(_, Some(_), _) => {
+ bug!("unsized coercion on an unsized rvalue");
+ }
+ }
+ bx
+ }
+
+ mir::Rvalue::Repeat(ref elem, count) => {
+ let cg_elem = self.codegen_operand(&mut bx, elem);
+
+ // Do not generate the loop for zero-sized elements or empty arrays.
+ if dest.layout.is_zst() {
+ return bx;
+ }
+
+ if let OperandValue::Immediate(v) = cg_elem.val {
+ let zero = bx.const_usize(0);
+ let start = dest.project_index(&mut bx, zero).llval;
+ let size = bx.const_usize(dest.layout.size.bytes());
+
+ // Use llvm.memset.p0i8.* to initialize all zero arrays
+ if bx.cx().const_to_opt_uint(v) == Some(0) {
+ let fill = bx.cx().const_u8(0);
+ bx.memset(start, fill, size, dest.align, MemFlags::empty());
+ return bx;
+ }
+
+ // Use llvm.memset.p0i8.* to initialize byte arrays
+ let v = bx.from_immediate(v);
+ if bx.cx().val_ty(v) == bx.cx().type_i8() {
+ bx.memset(start, v, size, dest.align, MemFlags::empty());
+ return bx;
+ }
+ }
+
+ let count =
+ self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
+
+ bx.write_operand_repeatedly(cg_elem, count, dest)
+ }
+
+ mir::Rvalue::Aggregate(ref kind, ref operands) => {
+ let (dest, active_field_index) = match **kind {
+ mir::AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
+ dest.codegen_set_discr(&mut bx, variant_index);
+ if bx.tcx().adt_def(adt_did).is_enum() {
+ (dest.project_downcast(&mut bx, variant_index), active_field_index)
+ } else {
+ (dest, active_field_index)
+ }
+ }
+ _ => (dest, None),
+ };
+ for (i, operand) in operands.iter().enumerate() {
+ let op = self.codegen_operand(&mut bx, operand);
+ // Do not generate stores and GEPis for zero-sized fields.
+ if !op.layout.is_zst() {
+ let field_index = active_field_index.unwrap_or(i);
+ let field = if let mir::AggregateKind::Array(_) = **kind {
+ let llindex = bx.cx().const_usize(field_index as u64);
+ dest.project_index(&mut bx, llindex)
+ } else {
+ dest.project_field(&mut bx, field_index)
+ };
+ op.val.store(&mut bx, field);
+ }
+ }
+ bx
+ }
+
+ _ => {
+ assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
+ let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
+ temp.val.store(&mut bx, dest);
+ bx
+ }
+ }
+ }
+
+ pub fn codegen_rvalue_unsized(
+ &mut self,
+ mut bx: Bx,
+ indirect_dest: PlaceRef<'tcx, Bx::Value>,
+ rvalue: &mir::Rvalue<'tcx>,
+ ) -> Bx {
+ debug!(
+ "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
+ indirect_dest.llval, rvalue
+ );
+
+ match *rvalue {
+ mir::Rvalue::Use(ref operand) => {
+ let cg_operand = self.codegen_operand(&mut bx, operand);
+ cg_operand.val.store_unsized(&mut bx, indirect_dest);
+ bx
+ }
+
+ _ => bug!("unsized assignment other than `Rvalue::Use`"),
+ }
+ }
+
+ pub fn codegen_rvalue_operand(
+ &mut self,
+ mut bx: Bx,
+ rvalue: &mir::Rvalue<'tcx>,
+ ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
+ assert!(
+ self.rvalue_creates_operand(rvalue, DUMMY_SP),
+ "cannot codegen {:?} to operand",
+ rvalue,
+ );
+
+ match *rvalue {
+ mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
+ let operand = self.codegen_operand(&mut bx, source);
+ debug!("cast operand is {:?}", operand);
+ let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
+
+ let val = match *kind {
+ mir::CastKind::PointerExposeAddress => {
+ assert!(bx.cx().is_backend_immediate(cast));
+ let llptr = operand.immediate();
+ let llcast_ty = bx.cx().immediate_backend_type(cast);
+ let lladdr = bx.ptrtoint(llptr, llcast_ty);
+ OperandValue::Immediate(lladdr)
+ }
+ mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
+ match *operand.layout.ty.kind() {
+ ty::FnDef(def_id, substs) => {
+ let instance = ty::Instance::resolve_for_fn_ptr(
+ bx.tcx(),
+ ty::ParamEnv::reveal_all(),
+ def_id,
+ substs,
+ )
+ .unwrap()
+ .polymorphize(bx.cx().tcx());
+ OperandValue::Immediate(bx.get_fn_addr(instance))
+ }
+ _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
+ }
+ }
+ mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
+ match *operand.layout.ty.kind() {
+ ty::Closure(def_id, substs) => {
+ let instance = Instance::resolve_closure(
+ bx.cx().tcx(),
+ def_id,
+ substs,
+ ty::ClosureKind::FnOnce,
+ )
+ .expect("failed to normalize and resolve closure during codegen")
+ .polymorphize(bx.cx().tcx());
+ OperandValue::Immediate(bx.cx().get_fn_addr(instance))
+ }
+ _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
+ }
+ }
+ mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
+ // This is a no-op at the LLVM level.
+ operand.val
+ }
+ mir::CastKind::Pointer(PointerCast::Unsize) => {
+ assert!(bx.cx().is_backend_scalar_pair(cast));
+ let (lldata, llextra) = match operand.val {
+ OperandValue::Pair(lldata, llextra) => {
+ // unsize from a fat pointer -- this is a
+ // "trait-object-to-supertrait" coercion.
+ (lldata, Some(llextra))
+ }
+ OperandValue::Immediate(lldata) => {
+ // "standard" unsize
+ (lldata, None)
+ }
+ OperandValue::Ref(..) => {
+ bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
+ }
+ };
+ let (lldata, llextra) =
+ base::unsize_ptr(&mut bx, lldata, operand.layout.ty, cast.ty, llextra);
+ OperandValue::Pair(lldata, llextra)
+ }
+ mir::CastKind::Pointer(PointerCast::MutToConstPointer)
+ | mir::CastKind::Misc
+ if bx.cx().is_backend_scalar_pair(operand.layout) =>
+ {
+ if let OperandValue::Pair(data_ptr, meta) = operand.val {
+ if bx.cx().is_backend_scalar_pair(cast) {
+ let data_cast = bx.pointercast(
+ data_ptr,
+ bx.cx().scalar_pair_element_backend_type(cast, 0, true),
+ );
+ OperandValue::Pair(data_cast, meta)
+ } else {
+ // cast to thin-ptr
+ // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
+ // pointer-cast of that pointer to desired pointer type.
+ let llcast_ty = bx.cx().immediate_backend_type(cast);
+ let llval = bx.pointercast(data_ptr, llcast_ty);
+ OperandValue::Immediate(llval)
+ }
+ } else {
+ bug!("unexpected non-pair operand");
+ }
+ }
+ mir::CastKind::Pointer(
+ PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
+ )
+ | mir::CastKind::Misc
+ // Since int2ptr can have arbitrary integer types as input (so we have to do
+ // sign extension and all that), it is currently best handled in the same code
+ // path as the other integer-to-X casts.
+ | mir::CastKind::PointerFromExposedAddress => {
+ assert!(bx.cx().is_backend_immediate(cast));
+ let ll_t_out = bx.cx().immediate_backend_type(cast);
+ if operand.layout.abi.is_uninhabited() {
+ let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
+ return (bx, OperandRef { val, layout: cast });
+ }
+ let r_t_in =
+ CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
+ let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
+ let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
+ let llval = operand.immediate();
+
+ let newval = match (r_t_in, r_t_out) {
+ (CastTy::Int(i), CastTy::Int(_)) => {
+ bx.intcast(llval, ll_t_out, i.is_signed())
+ }
+ (CastTy::Float, CastTy::Float) => {
+ let srcsz = bx.cx().float_width(ll_t_in);
+ let dstsz = bx.cx().float_width(ll_t_out);
+ if dstsz > srcsz {
+ bx.fpext(llval, ll_t_out)
+ } else if srcsz > dstsz {
+ bx.fptrunc(llval, ll_t_out)
+ } else {
+ llval
+ }
+ }
+ (CastTy::Int(i), CastTy::Float) => {
+ if i.is_signed() {
+ bx.sitofp(llval, ll_t_out)
+ } else {
+ bx.uitofp(llval, ll_t_out)
+ }
+ }
+ (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
+ bx.pointercast(llval, ll_t_out)
+ }
+ (CastTy::Int(i), CastTy::Ptr(_)) => {
+ let usize_llval =
+ bx.intcast(llval, bx.cx().type_isize(), i.is_signed());
+ bx.inttoptr(usize_llval, ll_t_out)
+ }
+ (CastTy::Float, CastTy::Int(IntTy::I)) => {
+ bx.cast_float_to_int(true, llval, ll_t_out)
+ }
+ (CastTy::Float, CastTy::Int(_)) => {
+ bx.cast_float_to_int(false, llval, ll_t_out)
+ }
+ _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
+ };
+ OperandValue::Immediate(newval)
+ }
+ };
+ (bx, OperandRef { val, layout: cast })
+ }
+
+ mir::Rvalue::Ref(_, bk, place) => {
+ let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
+ tcx.mk_ref(
+ tcx.lifetimes.re_erased,
+ ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
+ )
+ };
+ self.codegen_place_to_pointer(bx, place, mk_ref)
+ }
+
+ mir::Rvalue::CopyForDeref(place) => {
+ let operand = self.codegen_operand(&mut bx, &Operand::Copy(place));
+ (bx, operand)
+ }
+ mir::Rvalue::AddressOf(mutability, place) => {
+ let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
+ tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
+ };
+ self.codegen_place_to_pointer(bx, place, mk_ptr)
+ }
+
+ mir::Rvalue::Len(place) => {
+ let size = self.evaluate_array_len(&mut bx, place);
+ let operand = OperandRef {
+ val: OperandValue::Immediate(size),
+ layout: bx.cx().layout_of(bx.tcx().types.usize),
+ };
+ (bx, operand)
+ }
+
+ mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
+ let lhs = self.codegen_operand(&mut bx, lhs);
+ let rhs = self.codegen_operand(&mut bx, rhs);
+ let llresult = match (lhs.val, rhs.val) {
+ (
+ OperandValue::Pair(lhs_addr, lhs_extra),
+ OperandValue::Pair(rhs_addr, rhs_extra),
+ ) => self.codegen_fat_ptr_binop(
+ &mut bx,
+ op,
+ lhs_addr,
+ lhs_extra,
+ rhs_addr,
+ rhs_extra,
+ lhs.layout.ty,
+ ),
+
+ (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
+ self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
+ }
+
+ _ => bug!(),
+ };
+ let operand = OperandRef {
+ val: OperandValue::Immediate(llresult),
+ layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
+ };
+ (bx, operand)
+ }
+ mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
+ let lhs = self.codegen_operand(&mut bx, lhs);
+ let rhs = self.codegen_operand(&mut bx, rhs);
+ let result = self.codegen_scalar_checked_binop(
+ &mut bx,
+ op,
+ lhs.immediate(),
+ rhs.immediate(),
+ lhs.layout.ty,
+ );
+ let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
+ let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
+ let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
+
+ (bx, operand)
+ }
+
+ mir::Rvalue::UnaryOp(op, ref operand) => {
+ let operand = self.codegen_operand(&mut bx, operand);
+ let lloperand = operand.immediate();
+ let is_float = operand.layout.ty.is_floating_point();
+ let llval = match op {
+ mir::UnOp::Not => bx.not(lloperand),
+ mir::UnOp::Neg => {
+ if is_float {
+ bx.fneg(lloperand)
+ } else {
+ bx.neg(lloperand)
+ }
+ }
+ };
+ (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
+ }
+
+ mir::Rvalue::Discriminant(ref place) => {
+ let discr_ty = rvalue.ty(self.mir, bx.tcx());
+ let discr_ty = self.monomorphize(discr_ty);
+ let discr = self
+ .codegen_place(&mut bx, place.as_ref())
+ .codegen_get_discr(&mut bx, discr_ty);
+ (
+ bx,
+ OperandRef {
+ val: OperandValue::Immediate(discr),
+ layout: self.cx.layout_of(discr_ty),
+ },
+ )
+ }
+
+ mir::Rvalue::NullaryOp(null_op, ty) => {
+ let ty = self.monomorphize(ty);
+ assert!(bx.cx().type_is_sized(ty));
+ let layout = bx.cx().layout_of(ty);
+ let val = match null_op {
+ mir::NullOp::SizeOf => layout.size.bytes(),
+ mir::NullOp::AlignOf => layout.align.abi.bytes(),
+ };
+ let val = bx.cx().const_usize(val);
+ let tcx = self.cx.tcx();
+ (
+ bx,
+ OperandRef {
+ val: OperandValue::Immediate(val),
+ layout: self.cx.layout_of(tcx.types.usize),
+ },
+ )
+ }
+
+ mir::Rvalue::ThreadLocalRef(def_id) => {
+ assert!(bx.cx().tcx().is_static(def_id));
+ let static_ = bx.get_static(def_id);
+ let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
+ let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
+ (bx, operand)
+ }
+ mir::Rvalue::Use(ref operand) => {
+ let operand = self.codegen_operand(&mut bx, operand);
+ (bx, operand)
+ }
+ mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
+ // According to `rvalue_creates_operand`, only ZST
+ // aggregate rvalues are allowed to be operands.
+ let ty = rvalue.ty(self.mir, self.cx.tcx());
+ let operand =
+ OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty)));
+ (bx, operand)
+ }
+ mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
+ let operand = self.codegen_operand(&mut bx, operand);
+ let lloperand = operand.immediate();
+
+ let content_ty = self.monomorphize(content_ty);
+ let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
+ let llty_ptr = bx.cx().backend_type(box_layout);
+
+ let val = bx.pointercast(lloperand, llty_ptr);
+ let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
+ (bx, operand)
+ }
+ }
+ }
+
+ fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
+ // ZST are passed as operands and require special handling
+ // because codegen_place() panics if Local is operand.
+ if let Some(index) = place.as_local() {
+ if let LocalRef::Operand(Some(op)) = self.locals[index] {
+ if let ty::Array(_, n) = op.layout.ty.kind() {
+ let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
+ return bx.cx().const_usize(n);
+ }
+ }
+ }
+ // use common size calculation for non zero-sized types
+ let cg_value = self.codegen_place(bx, place.as_ref());
+ cg_value.len(bx.cx())
+ }
+
+ /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
+ fn codegen_place_to_pointer(
+ &mut self,
+ mut bx: Bx,
+ place: mir::Place<'tcx>,
+ mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
+ ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
+ let cg_place = self.codegen_place(&mut bx, place.as_ref());
+
+ let ty = cg_place.layout.ty;
+
+ // Note: places are indirect, so storing the `llval` into the
+ // destination effectively creates a reference.
+ let val = if !bx.cx().type_has_metadata(ty) {
+ OperandValue::Immediate(cg_place.llval)
+ } else {
+ OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
+ };
+ (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
+ }
+
+ pub fn codegen_scalar_binop(
+ &mut self,
+ bx: &mut Bx,
+ op: mir::BinOp,
+ lhs: Bx::Value,
+ rhs: Bx::Value,
+ input_ty: Ty<'tcx>,
+ ) -> Bx::Value {
+ let is_float = input_ty.is_floating_point();
+ let is_signed = input_ty.is_signed();
+ match op {
+ mir::BinOp::Add => {
+ if is_float {
+ bx.fadd(lhs, rhs)
+ } else {
+ bx.add(lhs, rhs)
+ }
+ }
+ mir::BinOp::Sub => {
+ if is_float {
+ bx.fsub(lhs, rhs)
+ } else {
+ bx.sub(lhs, rhs)
+ }
+ }
+ mir::BinOp::Mul => {
+ if is_float {
+ bx.fmul(lhs, rhs)
+ } else {
+ bx.mul(lhs, rhs)
+ }
+ }
+ mir::BinOp::Div => {
+ if is_float {
+ bx.fdiv(lhs, rhs)
+ } else if is_signed {
+ bx.sdiv(lhs, rhs)
+ } else {
+ bx.udiv(lhs, rhs)
+ }
+ }
+ mir::BinOp::Rem => {
+ if is_float {
+ bx.frem(lhs, rhs)
+ } else if is_signed {
+ bx.srem(lhs, rhs)
+ } else {
+ bx.urem(lhs, rhs)
+ }
+ }
+ mir::BinOp::BitOr => bx.or(lhs, rhs),
+ mir::BinOp::BitAnd => bx.and(lhs, rhs),
+ mir::BinOp::BitXor => bx.xor(lhs, rhs),
+ mir::BinOp::Offset => {
+ let pointee_type = input_ty
+ .builtin_deref(true)
+ .unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty))
+ .ty;
+ let llty = bx.cx().backend_type(bx.cx().layout_of(pointee_type));
+ bx.inbounds_gep(llty, lhs, &[rhs])
+ }
+ mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
+ mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
+ mir::BinOp::Ne
+ | mir::BinOp::Lt
+ | mir::BinOp::Gt
+ | mir::BinOp::Eq
+ | mir::BinOp::Le
+ | mir::BinOp::Ge => {
+ if is_float {
+ bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
+ } else {
+ bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
+ }
+ }
+ }
+ }
+
+ pub fn codegen_fat_ptr_binop(
+ &mut self,
+ bx: &mut Bx,
+ op: mir::BinOp,
+ lhs_addr: Bx::Value,
+ lhs_extra: Bx::Value,
+ rhs_addr: Bx::Value,
+ rhs_extra: Bx::Value,
+ _input_ty: Ty<'tcx>,
+ ) -> Bx::Value {
+ match op {
+ mir::BinOp::Eq => {
+ let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
+ let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
+ bx.and(lhs, rhs)
+ }
+ mir::BinOp::Ne => {
+ let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
+ let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
+ bx.or(lhs, rhs)
+ }
+ mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
+ // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
+ let (op, strict_op) = match op {
+ mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
+ mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
+ mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
+ mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
+ _ => bug!(),
+ };
+ let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
+ let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
+ let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
+ let rhs = bx.and(and_lhs, and_rhs);
+ bx.or(lhs, rhs)
+ }
+ _ => {
+ bug!("unexpected fat ptr binop");
+ }
+ }
+ }
+
+ pub fn codegen_scalar_checked_binop(
+ &mut self,
+ bx: &mut Bx,
+ op: mir::BinOp,
+ lhs: Bx::Value,
+ rhs: Bx::Value,
+ input_ty: Ty<'tcx>,
+ ) -> OperandValue<Bx::Value> {
+ // This case can currently arise only from functions marked
+ // with #[rustc_inherit_overflow_checks] and inlined from
+ // another crate (mostly core::num generic/#[inline] fns),
+ // while the current crate doesn't use overflow checks.
+ if !bx.cx().check_overflow() {
+ let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
+ return OperandValue::Pair(val, bx.cx().const_bool(false));
+ }
+
+ let (val, of) = match op {
+ // These are checked using intrinsics
+ mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
+ let oop = match op {
+ mir::BinOp::Add => OverflowOp::Add,
+ mir::BinOp::Sub => OverflowOp::Sub,
+ mir::BinOp::Mul => OverflowOp::Mul,
+ _ => unreachable!(),
+ };
+ bx.checked_binop(oop, input_ty, lhs, rhs)
+ }
+ mir::BinOp::Shl | mir::BinOp::Shr => {
+ let lhs_llty = bx.cx().val_ty(lhs);
+ let rhs_llty = bx.cx().val_ty(rhs);
+ let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
+ let outer_bits = bx.and(rhs, invert_mask);
+
+ let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
+ let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
+
+ (val, of)
+ }
+ _ => bug!("Operator `{:?}` is not a checkable operator", op),
+ };
+
+ OperandValue::Pair(val, of)
+ }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
+ match *rvalue {
+ mir::Rvalue::Ref(..) |
+ mir::Rvalue::CopyForDeref(..) |
+ mir::Rvalue::AddressOf(..) |
+ mir::Rvalue::Len(..) |
+ mir::Rvalue::Cast(..) | // (*)
+ mir::Rvalue::ShallowInitBox(..) | // (*)
+ mir::Rvalue::BinaryOp(..) |
+ mir::Rvalue::CheckedBinaryOp(..) |
+ mir::Rvalue::UnaryOp(..) |
+ mir::Rvalue::Discriminant(..) |
+ mir::Rvalue::NullaryOp(..) |
+ mir::Rvalue::ThreadLocalRef(_) |
+ mir::Rvalue::Use(..) => // (*)
+ true,
+ mir::Rvalue::Repeat(..) |
+ mir::Rvalue::Aggregate(..) => {
+ let ty = rvalue.ty(self.mir, self.cx.tcx());
+ let ty = self.monomorphize(ty);
+ self.cx.spanned_layout_of(ty, span).is_zst()
+ }
+ }
+
+ // (*) this is only true if the type is suitable
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/statement.rs b/compiler/rustc_codegen_ssa/src/mir/statement.rs
new file mode 100644
index 000000000..f452f2988
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/statement.rs
@@ -0,0 +1,102 @@
+use rustc_middle::mir;
+
+use super::FunctionCx;
+use super::LocalRef;
+use crate::traits::BuilderMethods;
+use crate::traits::*;
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ #[instrument(level = "debug", skip(self, bx))]
+ pub fn codegen_statement(&mut self, mut bx: Bx, statement: &mir::Statement<'tcx>) -> Bx {
+ self.set_debug_loc(&mut bx, statement.source_info);
+ match statement.kind {
+ mir::StatementKind::Assign(box (ref place, ref rvalue)) => {
+ if let Some(index) = place.as_local() {
+ match self.locals[index] {
+ LocalRef::Place(cg_dest) => self.codegen_rvalue(bx, cg_dest, rvalue),
+ LocalRef::UnsizedPlace(cg_indirect_dest) => {
+ self.codegen_rvalue_unsized(bx, cg_indirect_dest, rvalue)
+ }
+ LocalRef::Operand(None) => {
+ let (mut bx, operand) = self.codegen_rvalue_operand(bx, rvalue);
+ self.locals[index] = LocalRef::Operand(Some(operand));
+ self.debug_introduce_local(&mut bx, index);
+ bx
+ }
+ LocalRef::Operand(Some(op)) => {
+ if !op.layout.is_zst() {
+ span_bug!(
+ statement.source_info.span,
+ "operand {:?} already assigned",
+ rvalue
+ );
+ }
+
+ // If the type is zero-sized, it's already been set here,
+ // but we still need to make sure we codegen the operand
+ self.codegen_rvalue_operand(bx, rvalue).0
+ }
+ }
+ } else {
+ let cg_dest = self.codegen_place(&mut bx, place.as_ref());
+ self.codegen_rvalue(bx, cg_dest, rvalue)
+ }
+ }
+ mir::StatementKind::SetDiscriminant { box ref place, variant_index } => {
+ self.codegen_place(&mut bx, place.as_ref())
+ .codegen_set_discr(&mut bx, variant_index);
+ bx
+ }
+ mir::StatementKind::Deinit(..) => {
+ // For now, don't codegen this to anything. In the future it may be worth
+ // experimenting with what kind of information we can emit to LLVM without hurting
+ // perf here
+ bx
+ }
+ mir::StatementKind::StorageLive(local) => {
+ if let LocalRef::Place(cg_place) = self.locals[local] {
+ cg_place.storage_live(&mut bx);
+ } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
+ cg_indirect_place.storage_live(&mut bx);
+ }
+ bx
+ }
+ mir::StatementKind::StorageDead(local) => {
+ if let LocalRef::Place(cg_place) = self.locals[local] {
+ cg_place.storage_dead(&mut bx);
+ } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
+ cg_indirect_place.storage_dead(&mut bx);
+ }
+ bx
+ }
+ mir::StatementKind::Coverage(box ref coverage) => {
+ self.codegen_coverage(&mut bx, coverage.clone(), statement.source_info.scope);
+ bx
+ }
+ mir::StatementKind::CopyNonOverlapping(box mir::CopyNonOverlapping {
+ ref src,
+ ref dst,
+ ref count,
+ }) => {
+ let dst_val = self.codegen_operand(&mut bx, dst);
+ let src_val = self.codegen_operand(&mut bx, src);
+ let count = self.codegen_operand(&mut bx, count).immediate();
+ let pointee_layout = dst_val
+ .layout
+ .pointee_info_at(&bx, rustc_target::abi::Size::ZERO)
+ .expect("Expected pointer");
+ let bytes = bx.mul(count, bx.const_usize(pointee_layout.size.bytes()));
+
+ let align = pointee_layout.align;
+ let dst = dst_val.immediate();
+ let src = src_val.immediate();
+ bx.memcpy(dst, align, src, align, bytes, crate::MemFlags::empty());
+ bx
+ }
+ mir::StatementKind::FakeRead(..)
+ | mir::StatementKind::Retag { .. }
+ | mir::StatementKind::AscribeUserType(..)
+ | mir::StatementKind::Nop => bx,
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mono_item.rs b/compiler/rustc_codegen_ssa/src/mono_item.rs
new file mode 100644
index 000000000..5006a2157
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mono_item.rs
@@ -0,0 +1,147 @@
+use crate::base;
+use crate::common;
+use crate::traits::*;
+use rustc_hir as hir;
+use rustc_middle::mir::mono::MonoItem;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty;
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
+use rustc_middle::ty::Instance;
+
+pub trait MonoItemExt<'a, 'tcx> {
+ fn define<Bx: BuilderMethods<'a, 'tcx>>(&self, cx: &'a Bx::CodegenCx);
+ fn predefine<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ cx: &'a Bx::CodegenCx,
+ linkage: Linkage,
+ visibility: Visibility,
+ );
+ fn to_raw_string(&self) -> String;
+}
+
+impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {
+ fn define<Bx: BuilderMethods<'a, 'tcx>>(&self, cx: &'a Bx::CodegenCx) {
+ debug!(
+ "BEGIN IMPLEMENTING '{} ({})' in cgu {}",
+ self,
+ self.to_raw_string(),
+ cx.codegen_unit().name()
+ );
+
+ match *self {
+ MonoItem::Static(def_id) => {
+ cx.codegen_static(def_id, cx.tcx().is_mutable_static(def_id));
+ }
+ MonoItem::GlobalAsm(item_id) => {
+ let item = cx.tcx().hir().item(item_id);
+ if let hir::ItemKind::GlobalAsm(ref asm) = item.kind {
+ let operands: Vec<_> = asm
+ .operands
+ .iter()
+ .map(|(op, op_sp)| match *op {
+ hir::InlineAsmOperand::Const { ref anon_const } => {
+ let anon_const_def_id =
+ cx.tcx().hir().local_def_id(anon_const.hir_id).to_def_id();
+ let const_value =
+ cx.tcx().const_eval_poly(anon_const_def_id).unwrap_or_else(
+ |_| span_bug!(*op_sp, "asm const cannot be resolved"),
+ );
+ let ty = cx
+ .tcx()
+ .typeck_body(anon_const.body)
+ .node_type(anon_const.hir_id);
+ let string = common::asm_const_to_str(
+ cx.tcx(),
+ *op_sp,
+ const_value,
+ cx.layout_of(ty),
+ );
+ GlobalAsmOperandRef::Const { string }
+ }
+ hir::InlineAsmOperand::SymFn { ref anon_const } => {
+ let ty = cx
+ .tcx()
+ .typeck_body(anon_const.body)
+ .node_type(anon_const.hir_id);
+ let instance = match ty.kind() {
+ &ty::FnDef(def_id, substs) => Instance::new(def_id, substs),
+ _ => span_bug!(*op_sp, "asm sym is not a function"),
+ };
+
+ GlobalAsmOperandRef::SymFn { instance }
+ }
+ hir::InlineAsmOperand::SymStatic { path: _, def_id } => {
+ GlobalAsmOperandRef::SymStatic { def_id }
+ }
+ hir::InlineAsmOperand::In { .. }
+ | hir::InlineAsmOperand::Out { .. }
+ | hir::InlineAsmOperand::InOut { .. }
+ | hir::InlineAsmOperand::SplitInOut { .. } => {
+ span_bug!(*op_sp, "invalid operand type for global_asm!")
+ }
+ })
+ .collect();
+
+ cx.codegen_global_asm(asm.template, &operands, asm.options, asm.line_spans);
+ } else {
+ span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type")
+ }
+ }
+ MonoItem::Fn(instance) => {
+ base::codegen_instance::<Bx>(&cx, instance);
+ }
+ }
+
+ debug!(
+ "END IMPLEMENTING '{} ({})' in cgu {}",
+ self,
+ self.to_raw_string(),
+ cx.codegen_unit().name()
+ );
+ }
+
+ fn predefine<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ cx: &'a Bx::CodegenCx,
+ linkage: Linkage,
+ visibility: Visibility,
+ ) {
+ debug!(
+ "BEGIN PREDEFINING '{} ({})' in cgu {}",
+ self,
+ self.to_raw_string(),
+ cx.codegen_unit().name()
+ );
+
+ let symbol_name = self.symbol_name(cx.tcx()).name;
+
+ debug!("symbol {}", &symbol_name);
+
+ match *self {
+ MonoItem::Static(def_id) => {
+ cx.predefine_static(def_id, linkage, visibility, &symbol_name);
+ }
+ MonoItem::Fn(instance) => {
+ cx.predefine_fn(instance, linkage, visibility, &symbol_name);
+ }
+ MonoItem::GlobalAsm(..) => {}
+ }
+
+ debug!(
+ "END PREDEFINING '{} ({})' in cgu {}",
+ self,
+ self.to_raw_string(),
+ cx.codegen_unit().name()
+ );
+ }
+
+ fn to_raw_string(&self) -> String {
+ match *self {
+ MonoItem::Fn(instance) => {
+ format!("Fn({:?}, {})", instance.def, instance.substs.as_ptr().addr())
+ }
+ MonoItem::Static(id) => format!("Static({:?})", id),
+ MonoItem::GlobalAsm(id) => format!("GlobalAsm({:?})", id),
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/target_features.rs b/compiler/rustc_codegen_ssa/src/target_features.rs
new file mode 100644
index 000000000..ecad05185
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/target_features.rs
@@ -0,0 +1,308 @@
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::ty::query::Providers;
+use rustc_session::Session;
+use rustc_span::symbol::sym;
+use rustc_span::symbol::Symbol;
+
+/// Features that control behaviour of rustc, rather than the codegen.
+pub const RUSTC_SPECIFIC_FEATURES: &[&str] = &["crt-static"];
+
+// When adding features to the below lists
+// check whether they're named already elsewhere in rust
+// e.g. in stdarch and whether the given name matches LLVM's
+// if it doesn't, to_llvm_feature in llvm_util in rustc_codegen_llvm needs to be adapted
+
+const ARM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("aclass", Some(sym::arm_target_feature)),
+ ("mclass", Some(sym::arm_target_feature)),
+ ("rclass", Some(sym::arm_target_feature)),
+ ("dsp", Some(sym::arm_target_feature)),
+ ("neon", Some(sym::arm_target_feature)),
+ ("crc", Some(sym::arm_target_feature)),
+ ("crypto", Some(sym::arm_target_feature)),
+ ("aes", Some(sym::arm_target_feature)),
+ ("sha2", Some(sym::arm_target_feature)),
+ ("i8mm", Some(sym::arm_target_feature)),
+ ("dotprod", Some(sym::arm_target_feature)),
+ ("v5te", Some(sym::arm_target_feature)),
+ ("v6", Some(sym::arm_target_feature)),
+ ("v6k", Some(sym::arm_target_feature)),
+ ("v6t2", Some(sym::arm_target_feature)),
+ ("v7", Some(sym::arm_target_feature)),
+ ("v8", Some(sym::arm_target_feature)),
+ ("vfp2", Some(sym::arm_target_feature)),
+ ("vfp3", Some(sym::arm_target_feature)),
+ ("vfp4", Some(sym::arm_target_feature)),
+ ("fp-armv8", Some(sym::arm_target_feature)),
+ // This is needed for inline assembly, but shouldn't be stabilized as-is
+ // since it should be enabled per-function using #[instruction_set], not
+ // #[target_feature].
+ ("thumb-mode", Some(sym::arm_target_feature)),
+ ("thumb2", Some(sym::arm_target_feature)),
+ ("d32", Some(sym::arm_target_feature)),
+];
+
+const AARCH64_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ // FEAT_AdvSimd & FEAT_FP
+ ("neon", None),
+ // FEAT_FP16
+ ("fp16", None),
+ // FEAT_SVE
+ ("sve", None),
+ // FEAT_CRC
+ ("crc", None),
+ // FEAT_RAS
+ ("ras", None),
+ // FEAT_LSE
+ ("lse", None),
+ // FEAT_RDM
+ ("rdm", None),
+ // FEAT_RCPC
+ ("rcpc", None),
+ // FEAT_RCPC2
+ ("rcpc2", None),
+ // FEAT_DotProd
+ ("dotprod", None),
+ // FEAT_TME
+ ("tme", None),
+ // FEAT_FHM
+ ("fhm", None),
+ // FEAT_DIT
+ ("dit", None),
+ // FEAT_FLAGM
+ ("flagm", None),
+ // FEAT_SSBS
+ ("ssbs", None),
+ // FEAT_SB
+ ("sb", None),
+ // FEAT_PAUTH (address authentication)
+ ("paca", None),
+ // FEAT_PAUTH (generic authentication)
+ ("pacg", None),
+ // FEAT_DPB
+ ("dpb", None),
+ // FEAT_DPB2
+ ("dpb2", None),
+ // FEAT_SVE2
+ ("sve2", None),
+ // FEAT_SVE2_AES
+ ("sve2-aes", None),
+ // FEAT_SVE2_SM4
+ ("sve2-sm4", None),
+ // FEAT_SVE2_SHA3
+ ("sve2-sha3", None),
+ // FEAT_SVE2_BitPerm
+ ("sve2-bitperm", None),
+ // FEAT_FRINTTS
+ ("frintts", None),
+ // FEAT_I8MM
+ ("i8mm", None),
+ // FEAT_F32MM
+ ("f32mm", None),
+ // FEAT_F64MM
+ ("f64mm", None),
+ // FEAT_BF16
+ ("bf16", None),
+ // FEAT_RAND
+ ("rand", None),
+ // FEAT_BTI
+ ("bti", None),
+ // FEAT_MTE
+ ("mte", None),
+ // FEAT_JSCVT
+ ("jsconv", None),
+ // FEAT_FCMA
+ ("fcma", None),
+ // FEAT_AES
+ ("aes", None),
+ // FEAT_SHA1 & FEAT_SHA256
+ ("sha2", None),
+ // FEAT_SHA512 & FEAT_SHA3
+ ("sha3", None),
+ // FEAT_SM3 & FEAT_SM4
+ ("sm4", None),
+ // FEAT_PAN
+ ("pan", None),
+ // FEAT_LOR
+ ("lor", None),
+ // FEAT_VHE
+ ("vh", None),
+ // FEAT_PMUv3
+ ("pmuv3", None),
+ // FEAT_SPE
+ ("spe", None),
+ ("v8.1a", Some(sym::aarch64_ver_target_feature)),
+ ("v8.2a", Some(sym::aarch64_ver_target_feature)),
+ ("v8.3a", Some(sym::aarch64_ver_target_feature)),
+ ("v8.4a", Some(sym::aarch64_ver_target_feature)),
+ ("v8.5a", Some(sym::aarch64_ver_target_feature)),
+ ("v8.6a", Some(sym::aarch64_ver_target_feature)),
+ ("v8.7a", Some(sym::aarch64_ver_target_feature)),
+];
+
+const AARCH64_TIED_FEATURES: &[&[&str]] = &[
+ &["paca", "pacg"], // Together these represent `pauth` in LLVM
+];
+
+const X86_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("adx", None),
+ ("aes", None),
+ ("avx", None),
+ ("avx2", None),
+ ("avx512bf16", Some(sym::avx512_target_feature)),
+ ("avx512bitalg", Some(sym::avx512_target_feature)),
+ ("avx512bw", Some(sym::avx512_target_feature)),
+ ("avx512cd", Some(sym::avx512_target_feature)),
+ ("avx512dq", Some(sym::avx512_target_feature)),
+ ("avx512er", Some(sym::avx512_target_feature)),
+ ("avx512f", Some(sym::avx512_target_feature)),
+ ("avx512gfni", Some(sym::avx512_target_feature)),
+ ("avx512ifma", Some(sym::avx512_target_feature)),
+ ("avx512pf", Some(sym::avx512_target_feature)),
+ ("avx512vaes", Some(sym::avx512_target_feature)),
+ ("avx512vbmi", Some(sym::avx512_target_feature)),
+ ("avx512vbmi2", Some(sym::avx512_target_feature)),
+ ("avx512vl", Some(sym::avx512_target_feature)),
+ ("avx512vnni", Some(sym::avx512_target_feature)),
+ ("avx512vp2intersect", Some(sym::avx512_target_feature)),
+ ("avx512vpclmulqdq", Some(sym::avx512_target_feature)),
+ ("avx512vpopcntdq", Some(sym::avx512_target_feature)),
+ ("bmi1", None),
+ ("bmi2", None),
+ ("cmpxchg16b", Some(sym::cmpxchg16b_target_feature)),
+ ("ermsb", Some(sym::ermsb_target_feature)),
+ ("f16c", Some(sym::f16c_target_feature)),
+ ("fma", None),
+ ("fxsr", None),
+ ("lzcnt", None),
+ ("movbe", Some(sym::movbe_target_feature)),
+ ("pclmulqdq", None),
+ ("popcnt", None),
+ ("rdrand", None),
+ ("rdseed", None),
+ ("rtm", Some(sym::rtm_target_feature)),
+ ("sha", None),
+ ("sse", None),
+ ("sse2", None),
+ ("sse3", None),
+ ("sse4.1", None),
+ ("sse4.2", None),
+ ("sse4a", Some(sym::sse4a_target_feature)),
+ ("ssse3", None),
+ ("tbm", Some(sym::tbm_target_feature)),
+ ("xsave", None),
+ ("xsavec", None),
+ ("xsaveopt", None),
+ ("xsaves", None),
+];
+
+const HEXAGON_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("hvx", Some(sym::hexagon_target_feature)),
+ ("hvx-length128b", Some(sym::hexagon_target_feature)),
+];
+
+const POWERPC_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("altivec", Some(sym::powerpc_target_feature)),
+ ("power8-altivec", Some(sym::powerpc_target_feature)),
+ ("power9-altivec", Some(sym::powerpc_target_feature)),
+ ("power8-vector", Some(sym::powerpc_target_feature)),
+ ("power9-vector", Some(sym::powerpc_target_feature)),
+ ("vsx", Some(sym::powerpc_target_feature)),
+];
+
+const MIPS_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] =
+ &[("fp64", Some(sym::mips_target_feature)), ("msa", Some(sym::mips_target_feature))];
+
+const RISCV_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("m", Some(sym::riscv_target_feature)),
+ ("a", Some(sym::riscv_target_feature)),
+ ("c", Some(sym::riscv_target_feature)),
+ ("f", Some(sym::riscv_target_feature)),
+ ("d", Some(sym::riscv_target_feature)),
+ ("e", Some(sym::riscv_target_feature)),
+ ("v", Some(sym::riscv_target_feature)),
+ ("zfinx", Some(sym::riscv_target_feature)),
+ ("zdinx", Some(sym::riscv_target_feature)),
+ ("zhinx", Some(sym::riscv_target_feature)),
+ ("zhinxmin", Some(sym::riscv_target_feature)),
+ ("zfh", Some(sym::riscv_target_feature)),
+ ("zfhmin", Some(sym::riscv_target_feature)),
+ ("zbkb", Some(sym::riscv_target_feature)),
+ ("zbkc", Some(sym::riscv_target_feature)),
+ ("zbkx", Some(sym::riscv_target_feature)),
+ ("zknd", Some(sym::riscv_target_feature)),
+ ("zkne", Some(sym::riscv_target_feature)),
+ ("zknh", Some(sym::riscv_target_feature)),
+ ("zksed", Some(sym::riscv_target_feature)),
+ ("zksh", Some(sym::riscv_target_feature)),
+ ("zkr", Some(sym::riscv_target_feature)),
+ ("zkn", Some(sym::riscv_target_feature)),
+ ("zks", Some(sym::riscv_target_feature)),
+ ("zk", Some(sym::riscv_target_feature)),
+ ("zkt", Some(sym::riscv_target_feature)),
+];
+
+const WASM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("simd128", None),
+ ("atomics", Some(sym::wasm_target_feature)),
+ ("nontrapping-fptoint", Some(sym::wasm_target_feature)),
+ ("bulk-memory", Some(sym::wasm_target_feature)),
+ ("mutable-globals", Some(sym::wasm_target_feature)),
+ ("reference-types", Some(sym::wasm_target_feature)),
+ ("sign-ext", Some(sym::wasm_target_feature)),
+];
+
+const BPF_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[("alu32", Some(sym::bpf_target_feature))];
+
+/// When rustdoc is running, provide a list of all known features so that all their respective
+/// primitives may be documented.
+///
+/// IMPORTANT: If you're adding another feature list above, make sure to add it to this iterator!
+pub fn all_known_features() -> impl Iterator<Item = (&'static str, Option<Symbol>)> {
+ std::iter::empty()
+ .chain(ARM_ALLOWED_FEATURES.iter())
+ .chain(AARCH64_ALLOWED_FEATURES.iter())
+ .chain(X86_ALLOWED_FEATURES.iter())
+ .chain(HEXAGON_ALLOWED_FEATURES.iter())
+ .chain(POWERPC_ALLOWED_FEATURES.iter())
+ .chain(MIPS_ALLOWED_FEATURES.iter())
+ .chain(RISCV_ALLOWED_FEATURES.iter())
+ .chain(WASM_ALLOWED_FEATURES.iter())
+ .chain(BPF_ALLOWED_FEATURES.iter())
+ .cloned()
+}
+
+pub fn supported_target_features(sess: &Session) -> &'static [(&'static str, Option<Symbol>)] {
+ match &*sess.target.arch {
+ "arm" => ARM_ALLOWED_FEATURES,
+ "aarch64" => AARCH64_ALLOWED_FEATURES,
+ "x86" | "x86_64" => X86_ALLOWED_FEATURES,
+ "hexagon" => HEXAGON_ALLOWED_FEATURES,
+ "mips" | "mips64" => MIPS_ALLOWED_FEATURES,
+ "powerpc" | "powerpc64" => POWERPC_ALLOWED_FEATURES,
+ "riscv32" | "riscv64" => RISCV_ALLOWED_FEATURES,
+ "wasm32" | "wasm64" => WASM_ALLOWED_FEATURES,
+ "bpf" => BPF_ALLOWED_FEATURES,
+ _ => &[],
+ }
+}
+
+pub fn tied_target_features(sess: &Session) -> &'static [&'static [&'static str]] {
+ match &*sess.target.arch {
+ "aarch64" => AARCH64_TIED_FEATURES,
+ _ => &[],
+ }
+}
+
+pub(crate) fn provide(providers: &mut Providers) {
+ providers.supported_target_features = |tcx, cnum| {
+ assert_eq!(cnum, LOCAL_CRATE);
+ if tcx.sess.opts.actually_rustdoc {
+ // rustdoc needs to be able to document functions that use all the features, so
+ // whitelist them all
+ all_known_features().map(|(a, b)| (a.to_string(), b)).collect()
+ } else {
+ supported_target_features(tcx.sess).iter().map(|&(a, b)| (a.to_string(), b)).collect()
+ }
+ };
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/abi.rs b/compiler/rustc_codegen_ssa/src/traits/abi.rs
new file mode 100644
index 000000000..a00d78daf
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/abi.rs
@@ -0,0 +1,8 @@
+use super::BackendTypes;
+use rustc_middle::ty::Ty;
+use rustc_target::abi::call::FnAbi;
+
+pub trait AbiBuilderMethods<'tcx>: BackendTypes {
+ fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value);
+ fn get_param(&mut self, index: usize) -> Self::Value;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/asm.rs b/compiler/rustc_codegen_ssa/src/traits/asm.rs
new file mode 100644
index 000000000..c2ae74b18
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/asm.rs
@@ -0,0 +1,66 @@
+use super::BackendTypes;
+use crate::mir::operand::OperandRef;
+use crate::mir::place::PlaceRef;
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::Instance;
+use rustc_span::Span;
+use rustc_target::asm::InlineAsmRegOrRegClass;
+
+#[derive(Debug)]
+pub enum InlineAsmOperandRef<'tcx, B: BackendTypes + ?Sized> {
+ In {
+ reg: InlineAsmRegOrRegClass,
+ value: OperandRef<'tcx, B::Value>,
+ },
+ Out {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ place: Option<PlaceRef<'tcx, B::Value>>,
+ },
+ InOut {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ in_value: OperandRef<'tcx, B::Value>,
+ out_place: Option<PlaceRef<'tcx, B::Value>>,
+ },
+ Const {
+ string: String,
+ },
+ SymFn {
+ instance: Instance<'tcx>,
+ },
+ SymStatic {
+ def_id: DefId,
+ },
+}
+
+#[derive(Debug)]
+pub enum GlobalAsmOperandRef<'tcx> {
+ Const { string: String },
+ SymFn { instance: Instance<'tcx> },
+ SymStatic { def_id: DefId },
+}
+
+pub trait AsmBuilderMethods<'tcx>: BackendTypes {
+ /// Take an inline assembly expression and splat it out via LLVM
+ fn codegen_inline_asm(
+ &mut self,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[InlineAsmOperandRef<'tcx, Self>],
+ options: InlineAsmOptions,
+ line_spans: &[Span],
+ instance: Instance<'_>,
+ dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>,
+ );
+}
+
+pub trait AsmMethods<'tcx> {
+ fn codegen_global_asm(
+ &self,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[GlobalAsmOperandRef<'tcx>],
+ options: InlineAsmOptions,
+ line_spans: &[Span],
+ );
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/backend.rs b/compiler/rustc_codegen_ssa/src/traits/backend.rs
new file mode 100644
index 000000000..779bd3ea2
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/backend.rs
@@ -0,0 +1,161 @@
+use super::write::WriteBackendMethods;
+use super::CodegenObject;
+use crate::back::write::TargetMachineFactoryFn;
+use crate::{CodegenResults, ModuleCodegen};
+
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::ErrorGuaranteed;
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf, TyAndLayout};
+use rustc_middle::ty::query::{ExternProviders, Providers};
+use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_session::{
+ config::{self, OutputFilenames, PrintRequest},
+ cstore::MetadataLoaderDyn,
+ Session,
+};
+use rustc_span::symbol::Symbol;
+use rustc_target::abi::call::FnAbi;
+use rustc_target::spec::Target;
+
+pub use rustc_data_structures::sync::MetadataRef;
+
+use std::any::Any;
+
+pub trait BackendTypes {
+ type Value: CodegenObject;
+ type Function: CodegenObject;
+
+ type BasicBlock: Copy;
+ type Type: CodegenObject;
+ type Funclet;
+
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `Dbg`, `Debug`, `DebugInfo`, `DI` etc.).
+ type DIScope: Copy;
+ type DILocation: Copy;
+ type DIVariable: Copy;
+}
+
+pub trait Backend<'tcx>:
+ Sized
+ + BackendTypes
+ + HasTyCtxt<'tcx>
+ + LayoutOf<'tcx, LayoutOfResult = TyAndLayout<'tcx>>
+ + FnAbiOf<'tcx, FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>>
+{
+}
+
+impl<'tcx, T> Backend<'tcx> for T where
+ Self: BackendTypes
+ + HasTyCtxt<'tcx>
+ + LayoutOf<'tcx, LayoutOfResult = TyAndLayout<'tcx>>
+ + FnAbiOf<'tcx, FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>>
+{
+}
+
+pub trait CodegenBackend {
+ fn init(&self, _sess: &Session) {}
+ fn print(&self, _req: PrintRequest, _sess: &Session) {}
+ fn target_features(&self, _sess: &Session, _allow_unstable: bool) -> Vec<Symbol> {
+ vec![]
+ }
+ fn print_passes(&self) {}
+ fn print_version(&self) {}
+
+ /// If this plugin provides additional builtin targets, provide the one enabled by the options here.
+ /// Be careful: this is called *before* init() is called.
+ fn target_override(&self, _opts: &config::Options) -> Option<Target> {
+ None
+ }
+
+ /// The metadata loader used to load rlib and dylib metadata.
+ ///
+ /// Alternative codegen backends may want to use different rlib or dylib formats than the
+ /// default native static archives and dynamic libraries.
+ fn metadata_loader(&self) -> Box<MetadataLoaderDyn> {
+ Box::new(crate::back::metadata::DefaultMetadataLoader)
+ }
+
+ fn provide(&self, _providers: &mut Providers) {}
+ fn provide_extern(&self, _providers: &mut ExternProviders) {}
+ fn codegen_crate<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+ ) -> Box<dyn Any>;
+
+ /// This is called on the returned `Box<dyn Any>` from `codegen_backend`
+ ///
+ /// # Panics
+ ///
+ /// Panics when the passed `Box<dyn Any>` was not returned by `codegen_backend`.
+ fn join_codegen(
+ &self,
+ ongoing_codegen: Box<dyn Any>,
+ sess: &Session,
+ outputs: &OutputFilenames,
+ ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed>;
+
+ /// This is called on the returned `Box<dyn Any>` from `join_codegen`
+ ///
+ /// # Panics
+ ///
+ /// Panics when the passed `Box<dyn Any>` was not returned by `join_codegen`.
+ fn link(
+ &self,
+ sess: &Session,
+ codegen_results: CodegenResults,
+ outputs: &OutputFilenames,
+ ) -> Result<(), ErrorGuaranteed>;
+}
+
+pub trait ExtraBackendMethods: CodegenBackend + WriteBackendMethods + Sized + Send + Sync {
+ fn codegen_allocator<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ module_name: &str,
+ kind: AllocatorKind,
+ has_alloc_error_handler: bool,
+ ) -> Self::Module;
+ /// This generates the codegen unit and returns it along with
+ /// a `u64` giving an estimate of the unit's processing cost.
+ fn compile_codegen_unit(
+ &self,
+ tcx: TyCtxt<'_>,
+ cgu_name: Symbol,
+ ) -> (ModuleCodegen<Self::Module>, u64);
+ fn target_machine_factory(
+ &self,
+ sess: &Session,
+ opt_level: config::OptLevel,
+ target_features: &[String],
+ ) -> TargetMachineFactoryFn<Self>;
+ fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str;
+ fn tune_cpu<'b>(&self, sess: &'b Session) -> Option<&'b str>;
+
+ fn spawn_thread<F, T>(_time_trace: bool, f: F) -> std::thread::JoinHandle<T>
+ where
+ F: FnOnce() -> T,
+ F: Send + 'static,
+ T: Send + 'static,
+ {
+ std::thread::spawn(f)
+ }
+
+ fn spawn_named_thread<F, T>(
+ _time_trace: bool,
+ name: String,
+ f: F,
+ ) -> std::io::Result<std::thread::JoinHandle<T>>
+ where
+ F: FnOnce() -> T,
+ F: Send + 'static,
+ T: Send + 'static,
+ {
+ std::thread::Builder::new().name(name).spawn(f)
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs
new file mode 100644
index 000000000..9f49749bb
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs
@@ -0,0 +1,481 @@
+use super::abi::AbiBuilderMethods;
+use super::asm::AsmBuilderMethods;
+use super::consts::ConstMethods;
+use super::coverageinfo::CoverageInfoBuilderMethods;
+use super::debuginfo::DebugInfoBuilderMethods;
+use super::intrinsic::IntrinsicCallMethods;
+use super::misc::MiscMethods;
+use super::type_::{ArgAbiMethods, BaseTypeMethods};
+use super::{HasCodegen, StaticBuilderMethods};
+
+use crate::common::{
+ AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind,
+};
+use crate::mir::operand::OperandRef;
+use crate::mir::place::PlaceRef;
+use crate::MemFlags;
+
+use rustc_apfloat::{ieee, Float, Round, Status};
+use rustc_middle::ty::layout::{HasParamEnv, TyAndLayout};
+use rustc_middle::ty::Ty;
+use rustc_span::Span;
+use rustc_target::abi::{Abi, Align, Scalar, Size, WrappingRange};
+use rustc_target::spec::HasTargetSpec;
+
+#[derive(Copy, Clone)]
+pub enum OverflowOp {
+ Add,
+ Sub,
+ Mul,
+}
+
+pub trait BuilderMethods<'a, 'tcx>:
+ HasCodegen<'tcx>
+ + CoverageInfoBuilderMethods<'tcx>
+ + DebugInfoBuilderMethods
+ + ArgAbiMethods<'tcx>
+ + AbiBuilderMethods<'tcx>
+ + IntrinsicCallMethods<'tcx>
+ + AsmBuilderMethods<'tcx>
+ + StaticBuilderMethods
+ + HasParamEnv<'tcx>
+ + HasTargetSpec
+{
+ fn build(cx: &'a Self::CodegenCx, llbb: Self::BasicBlock) -> Self;
+
+ fn cx(&self) -> &Self::CodegenCx;
+ fn llbb(&self) -> Self::BasicBlock;
+
+ fn set_span(&mut self, span: Span);
+
+ // FIXME(eddyb) replace uses of this with `append_sibling_block`.
+ fn append_block(cx: &'a Self::CodegenCx, llfn: Self::Function, name: &str) -> Self::BasicBlock;
+
+ fn append_sibling_block(&mut self, name: &str) -> Self::BasicBlock;
+
+ fn switch_to_block(&mut self, llbb: Self::BasicBlock);
+
+ fn ret_void(&mut self);
+ fn ret(&mut self, v: Self::Value);
+ fn br(&mut self, dest: Self::BasicBlock);
+ fn cond_br(
+ &mut self,
+ cond: Self::Value,
+ then_llbb: Self::BasicBlock,
+ else_llbb: Self::BasicBlock,
+ );
+ fn switch(
+ &mut self,
+ v: Self::Value,
+ else_llbb: Self::BasicBlock,
+ cases: impl ExactSizeIterator<Item = (u128, Self::BasicBlock)>,
+ );
+ fn invoke(
+ &mut self,
+ llty: Self::Type,
+ llfn: Self::Value,
+ args: &[Self::Value],
+ then: Self::BasicBlock,
+ catch: Self::BasicBlock,
+ funclet: Option<&Self::Funclet>,
+ ) -> Self::Value;
+ fn unreachable(&mut self);
+
+ fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn sub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fsub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fsub_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn mul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fmul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fmul_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn udiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn exactudiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn sdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn exactsdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fdiv_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn urem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn srem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn frem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn frem_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn shl(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn lshr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn ashr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn unchecked_sadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn unchecked_uadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn unchecked_ssub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn unchecked_usub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn unchecked_smul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn unchecked_umul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn and(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn or(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn xor(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn neg(&mut self, v: Self::Value) -> Self::Value;
+ fn fneg(&mut self, v: Self::Value) -> Self::Value;
+ fn not(&mut self, v: Self::Value) -> Self::Value;
+
+ fn checked_binop(
+ &mut self,
+ oop: OverflowOp,
+ ty: Ty<'_>,
+ lhs: Self::Value,
+ rhs: Self::Value,
+ ) -> (Self::Value, Self::Value);
+
+ fn from_immediate(&mut self, val: Self::Value) -> Self::Value;
+ fn to_immediate(&mut self, val: Self::Value, layout: TyAndLayout<'_>) -> Self::Value {
+ if let Abi::Scalar(scalar) = layout.abi {
+ self.to_immediate_scalar(val, scalar)
+ } else {
+ val
+ }
+ }
+ fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value;
+
+ fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
+ fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
+ fn array_alloca(&mut self, ty: Self::Type, len: Self::Value, align: Align) -> Self::Value;
+
+ fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
+ fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;
+ fn atomic_load(
+ &mut self,
+ ty: Self::Type,
+ ptr: Self::Value,
+ order: AtomicOrdering,
+ size: Size,
+ ) -> Self::Value;
+ fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
+ -> OperandRef<'tcx, Self::Value>;
+
+ /// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset.
+ fn write_operand_repeatedly(
+ self,
+ elem: OperandRef<'tcx, Self::Value>,
+ count: u64,
+ dest: PlaceRef<'tcx, Self::Value>,
+ ) -> Self;
+
+ fn range_metadata(&mut self, load: Self::Value, range: WrappingRange);
+ fn nonnull_metadata(&mut self, load: Self::Value);
+
+ fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
+ fn store_with_flags(
+ &mut self,
+ val: Self::Value,
+ ptr: Self::Value,
+ align: Align,
+ flags: MemFlags,
+ ) -> Self::Value;
+ fn atomic_store(
+ &mut self,
+ val: Self::Value,
+ ptr: Self::Value,
+ order: AtomicOrdering,
+ size: Size,
+ );
+
+ fn gep(&mut self, ty: Self::Type, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
+ fn inbounds_gep(
+ &mut self,
+ ty: Self::Type,
+ ptr: Self::Value,
+ indices: &[Self::Value],
+ ) -> Self::Value;
+ fn struct_gep(&mut self, ty: Self::Type, ptr: Self::Value, idx: u64) -> Self::Value;
+
+ fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn fptoui_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Option<Self::Value>;
+ fn fptosi_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Option<Self::Value>;
+ fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn sitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn fptrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn fpext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn ptrtoint(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value;
+ fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+
+ fn cast_float_to_int(
+ &mut self,
+ signed: bool,
+ x: Self::Value,
+ dest_ty: Self::Type,
+ ) -> Self::Value {
+ let in_ty = self.cx().val_ty(x);
+ let (float_ty, int_ty) = if self.cx().type_kind(dest_ty) == TypeKind::Vector
+ && self.cx().type_kind(in_ty) == TypeKind::Vector
+ {
+ (self.cx().element_type(in_ty), self.cx().element_type(dest_ty))
+ } else {
+ (in_ty, dest_ty)
+ };
+ assert!(matches!(self.cx().type_kind(float_ty), TypeKind::Float | TypeKind::Double));
+ assert_eq!(self.cx().type_kind(int_ty), TypeKind::Integer);
+
+ if let Some(false) = self.cx().sess().opts.unstable_opts.saturating_float_casts {
+ return if signed { self.fptosi(x, dest_ty) } else { self.fptoui(x, dest_ty) };
+ }
+
+ let try_sat_result =
+ if signed { self.fptosi_sat(x, dest_ty) } else { self.fptoui_sat(x, dest_ty) };
+ if let Some(try_sat_result) = try_sat_result {
+ return try_sat_result;
+ }
+
+ let int_width = self.cx().int_width(int_ty);
+ let float_width = self.cx().float_width(float_ty);
+ // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
+ // destination integer type after rounding towards zero. This `undef` value can cause UB in
+ // safe code (see issue #10184), so we implement a saturating conversion on top of it:
+ // Semantically, the mathematical value of the input is rounded towards zero to the next
+ // mathematical integer, and then the result is clamped into the range of the destination
+ // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
+ // the destination integer type. NaN is mapped to 0.
+ //
+ // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
+ // a value representable in int_ty.
+ // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
+ // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
+ // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
+ // representable. Note that this only works if float_ty's exponent range is sufficiently large.
+ // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
+ // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
+ // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
+ // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
+ // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
+ let int_max = |signed: bool, int_width: u64| -> u128 {
+ let shift_amount = 128 - int_width;
+ if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
+ };
+ let int_min = |signed: bool, int_width: u64| -> i128 {
+ if signed { i128::MIN >> (128 - int_width) } else { 0 }
+ };
+
+ let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
+ let rounded_min =
+ ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
+ assert_eq!(rounded_min.status, Status::OK);
+ let rounded_max =
+ ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
+ assert!(rounded_max.value.is_finite());
+ (rounded_min.value.to_bits(), rounded_max.value.to_bits())
+ };
+ let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
+ let rounded_min =
+ ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
+ assert_eq!(rounded_min.status, Status::OK);
+ let rounded_max =
+ ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
+ assert!(rounded_max.value.is_finite());
+ (rounded_min.value.to_bits(), rounded_max.value.to_bits())
+ };
+ // To implement saturation, we perform the following steps:
+ //
+ // 1. Cast x to an integer with fpto[su]i. This may result in undef.
+ // 2. Compare x to f_min and f_max, and use the comparison results to select:
+ // a) int_ty::MIN if x < f_min or x is NaN
+ // b) int_ty::MAX if x > f_max
+ // c) the result of fpto[su]i otherwise
+ // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
+ //
+ // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
+ // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
+ // undef does not introduce any non-determinism either.
+ // More importantly, the above procedure correctly implements saturating conversion.
+ // Proof (sketch):
+ // If x is NaN, 0 is returned by definition.
+ // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
+ // This yields three cases to consider:
+ // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
+ // saturating conversion for inputs in that range.
+ // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
+ // (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
+ // than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
+ // is correct.
+ // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
+ // int_ty::MIN and therefore the return value of int_ty::MIN is correct.
+ // QED.
+
+ let float_bits_to_llval = |bx: &mut Self, bits| {
+ let bits_llval = match float_width {
+ 32 => bx.cx().const_u32(bits as u32),
+ 64 => bx.cx().const_u64(bits as u64),
+ n => bug!("unsupported float width {}", n),
+ };
+ bx.bitcast(bits_llval, float_ty)
+ };
+ let (f_min, f_max) = match float_width {
+ 32 => compute_clamp_bounds_single(signed, int_width),
+ 64 => compute_clamp_bounds_double(signed, int_width),
+ n => bug!("unsupported float width {}", n),
+ };
+ let f_min = float_bits_to_llval(self, f_min);
+ let f_max = float_bits_to_llval(self, f_max);
+ let int_max = self.cx().const_uint_big(int_ty, int_max(signed, int_width));
+ let int_min = self.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128);
+ let zero = self.cx().const_uint(int_ty, 0);
+
+ // If we're working with vectors, constants must be "splatted": the constant is duplicated
+ // into each lane of the vector. The algorithm stays the same, we are just using the
+ // same constant across all lanes.
+ let maybe_splat = |bx: &mut Self, val| {
+ if bx.cx().type_kind(dest_ty) == TypeKind::Vector {
+ bx.vector_splat(bx.vector_length(dest_ty), val)
+ } else {
+ val
+ }
+ };
+ let f_min = maybe_splat(self, f_min);
+ let f_max = maybe_splat(self, f_max);
+ let int_max = maybe_splat(self, int_max);
+ let int_min = maybe_splat(self, int_min);
+ let zero = maybe_splat(self, zero);
+
+ // Step 1 ...
+ let fptosui_result = if signed { self.fptosi(x, dest_ty) } else { self.fptoui(x, dest_ty) };
+ let less_or_nan = self.fcmp(RealPredicate::RealULT, x, f_min);
+ let greater = self.fcmp(RealPredicate::RealOGT, x, f_max);
+
+ // Step 2: We use two comparisons and two selects, with %s1 being the
+ // result:
+ // %less_or_nan = fcmp ult %x, %f_min
+ // %greater = fcmp olt %x, %f_max
+ // %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
+ // %s1 = select %greater, int_ty::MAX, %s0
+ // Note that %less_or_nan uses an *unordered* comparison. This
+ // comparison is true if the operands are not comparable (i.e., if x is
+ // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if
+ // x is NaN.
+ //
+ // Performance note: Unordered comparison can be lowered to a "flipped"
+ // comparison and a negation, and the negation can be merged into the
+ // select. Therefore, it not necessarily any more expensive than an
+ // ordered ("normal") comparison. Whether these optimizations will be
+ // performed is ultimately up to the backend, but at least x86 does
+ // perform them.
+ let s0 = self.select(less_or_nan, int_min, fptosui_result);
+ let s1 = self.select(greater, int_max, s0);
+
+ // Step 3: NaN replacement.
+ // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
+ // Therefore we only need to execute this step for signed integer types.
+ if signed {
+ // LLVM has no isNaN predicate, so we use (x == x) instead
+ let cmp = self.fcmp(RealPredicate::RealOEQ, x, x);
+ self.select(cmp, s1, zero)
+ } else {
+ s1
+ }
+ }
+
+ fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+
+ fn memcpy(
+ &mut self,
+ dst: Self::Value,
+ dst_align: Align,
+ src: Self::Value,
+ src_align: Align,
+ size: Self::Value,
+ flags: MemFlags,
+ );
+ fn memmove(
+ &mut self,
+ dst: Self::Value,
+ dst_align: Align,
+ src: Self::Value,
+ src_align: Align,
+ size: Self::Value,
+ flags: MemFlags,
+ );
+ fn memset(
+ &mut self,
+ ptr: Self::Value,
+ fill_byte: Self::Value,
+ size: Self::Value,
+ align: Align,
+ flags: MemFlags,
+ );
+
+ fn select(
+ &mut self,
+ cond: Self::Value,
+ then_val: Self::Value,
+ else_val: Self::Value,
+ ) -> Self::Value;
+
+ fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value;
+ fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value;
+ fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value;
+ fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value;
+ fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value;
+
+ fn set_personality_fn(&mut self, personality: Self::Value);
+
+ // These are used by everyone except msvc
+ fn cleanup_landing_pad(&mut self, ty: Self::Type, pers_fn: Self::Value) -> Self::Value;
+ fn resume(&mut self, exn: Self::Value);
+
+ // These are used only by msvc
+ fn cleanup_pad(&mut self, parent: Option<Self::Value>, args: &[Self::Value]) -> Self::Funclet;
+ fn cleanup_ret(&mut self, funclet: &Self::Funclet, unwind: Option<Self::BasicBlock>);
+ fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet;
+ fn catch_switch(
+ &mut self,
+ parent: Option<Self::Value>,
+ unwind: Option<Self::BasicBlock>,
+ handlers: &[Self::BasicBlock],
+ ) -> Self::Value;
+
+ fn atomic_cmpxchg(
+ &mut self,
+ dst: Self::Value,
+ cmp: Self::Value,
+ src: Self::Value,
+ order: AtomicOrdering,
+ failure_order: AtomicOrdering,
+ weak: bool,
+ ) -> Self::Value;
+ fn atomic_rmw(
+ &mut self,
+ op: AtomicRmwBinOp,
+ dst: Self::Value,
+ src: Self::Value,
+ order: AtomicOrdering,
+ ) -> Self::Value;
+ fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope);
+ fn set_invariant_load(&mut self, load: Self::Value);
+
+ /// Called for `StorageLive`
+ fn lifetime_start(&mut self, ptr: Self::Value, size: Size);
+
+ /// Called for `StorageDead`
+ fn lifetime_end(&mut self, ptr: Self::Value, size: Size);
+
+ fn instrprof_increment(
+ &mut self,
+ fn_name: Self::Value,
+ hash: Self::Value,
+ num_counters: Self::Value,
+ index: Self::Value,
+ );
+
+ fn call(
+ &mut self,
+ llty: Self::Type,
+ llfn: Self::Value,
+ args: &[Self::Value],
+ funclet: Option<&Self::Funclet>,
+ ) -> Self::Value;
+ fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+
+ fn do_not_inline(&mut self, llret: Self::Value);
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/consts.rs b/compiler/rustc_codegen_ssa/src/traits/consts.rs
new file mode 100644
index 000000000..8a91d4735
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/consts.rs
@@ -0,0 +1,41 @@
+use super::BackendTypes;
+use crate::mir::place::PlaceRef;
+use rustc_middle::mir::interpret::{ConstAllocation, Scalar};
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_target::abi::{self, Size};
+
+pub trait ConstMethods<'tcx>: BackendTypes {
+ // Constant constructors
+ fn const_null(&self, t: Self::Type) -> Self::Value;
+ fn const_undef(&self, t: Self::Type) -> Self::Value;
+ fn const_int(&self, t: Self::Type, i: i64) -> Self::Value;
+ fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value;
+ fn const_uint_big(&self, t: Self::Type, u: u128) -> Self::Value;
+ fn const_bool(&self, val: bool) -> Self::Value;
+ fn const_i16(&self, i: i16) -> Self::Value;
+ fn const_i32(&self, i: i32) -> Self::Value;
+ fn const_u32(&self, i: u32) -> Self::Value;
+ fn const_u64(&self, i: u64) -> Self::Value;
+ fn const_usize(&self, i: u64) -> Self::Value;
+ fn const_u8(&self, i: u8) -> Self::Value;
+ fn const_real(&self, t: Self::Type, val: f64) -> Self::Value;
+
+ fn const_str(&self, s: &str) -> (Self::Value, Self::Value);
+ fn const_struct(&self, elts: &[Self::Value], packed: bool) -> Self::Value;
+
+ fn const_to_opt_uint(&self, v: Self::Value) -> Option<u64>;
+ fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option<u128>;
+
+ fn const_data_from_alloc(&self, alloc: ConstAllocation<'tcx>) -> Self::Value;
+
+ fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: Self::Type) -> Self::Value;
+ fn zst_to_backend(&self, llty: Self::Type) -> Self::Value;
+ fn from_const_alloc(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ alloc: ConstAllocation<'tcx>,
+ offset: Size,
+ ) -> PlaceRef<'tcx, Self::Value>;
+
+ fn const_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs b/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs
new file mode 100644
index 000000000..e77201cf0
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs
@@ -0,0 +1,57 @@
+use super::BackendTypes;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::coverage::*;
+use rustc_middle::ty::Instance;
+
+pub trait CoverageInfoMethods<'tcx>: BackendTypes {
+ fn coverageinfo_finalize(&self);
+
+ /// Codegen a small function that will never be called, with one counter
+ /// that will never be incremented, that gives LLVM coverage tools a
+ /// function definition it needs in order to resolve coverage map references
+ /// to unused functions. This is necessary so unused functions will appear
+ /// as uncovered (coverage execution count `0`) in LLVM coverage reports.
+ fn define_unused_fn(&self, def_id: DefId);
+
+ /// For LLVM codegen, returns a function-specific `Value` for a global
+ /// string, to hold the function name passed to LLVM intrinsic
+ /// `instrprof.increment()`. The `Value` is only created once per instance.
+ /// Multiple invocations with the same instance return the same `Value`.
+ fn get_pgo_func_name_var(&self, instance: Instance<'tcx>) -> Self::Value;
+}
+
+pub trait CoverageInfoBuilderMethods<'tcx>: BackendTypes {
+ /// Returns true if the function source hash was added to the coverage map (even if it had
+ /// already been added, for this instance). Returns false *only* if `-C instrument-coverage` is
+ /// not enabled (a coverage map is not being generated).
+ fn set_function_source_hash(
+ &mut self,
+ instance: Instance<'tcx>,
+ function_source_hash: u64,
+ ) -> bool;
+
+ /// Returns true if the counter was added to the coverage map; false if `-C instrument-coverage`
+ /// is not enabled (a coverage map is not being generated).
+ fn add_coverage_counter(
+ &mut self,
+ instance: Instance<'tcx>,
+ index: CounterValueReference,
+ region: CodeRegion,
+ ) -> bool;
+
+ /// Returns true if the expression was added to the coverage map; false if
+ /// `-C instrument-coverage` is not enabled (a coverage map is not being generated).
+ fn add_coverage_counter_expression(
+ &mut self,
+ instance: Instance<'tcx>,
+ id: InjectedExpressionId,
+ lhs: ExpressionOperandId,
+ op: Op,
+ rhs: ExpressionOperandId,
+ region: Option<CodeRegion>,
+ ) -> bool;
+
+ /// Returns true if the region was added to the coverage map; false if `-C instrument-coverage`
+ /// is not enabled (a coverage map is not being generated).
+ fn add_coverage_unreachable(&mut self, instance: Instance<'tcx>, region: CodeRegion) -> bool;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs b/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs
new file mode 100644
index 000000000..f310789d1
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs
@@ -0,0 +1,79 @@
+use super::BackendTypes;
+use crate::mir::debuginfo::{FunctionDebugContext, VariableKind};
+use rustc_middle::mir;
+use rustc_middle::ty::{Instance, PolyExistentialTraitRef, Ty};
+use rustc_span::{SourceFile, Span, Symbol};
+use rustc_target::abi::call::FnAbi;
+use rustc_target::abi::Size;
+
+pub trait DebugInfoMethods<'tcx>: BackendTypes {
+ fn create_vtable_debuginfo(
+ &self,
+ ty: Ty<'tcx>,
+ trait_ref: Option<PolyExistentialTraitRef<'tcx>>,
+ vtable: Self::Value,
+ );
+
+ /// Creates the function-specific debug context.
+ ///
+ /// Returns the FunctionDebugContext for the function which holds state needed
+ /// for debug info creation, if it is enabled.
+ fn create_function_debug_context(
+ &self,
+ instance: Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ llfn: Self::Function,
+ mir: &mir::Body<'tcx>,
+ ) -> Option<FunctionDebugContext<Self::DIScope, Self::DILocation>>;
+
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+ fn dbg_scope_fn(
+ &self,
+ instance: Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ maybe_definition_llfn: Option<Self::Function>,
+ ) -> Self::DIScope;
+
+ fn dbg_loc(
+ &self,
+ scope: Self::DIScope,
+ inlined_at: Option<Self::DILocation>,
+ span: Span,
+ ) -> Self::DILocation;
+
+ fn extend_scope_to_file(
+ &self,
+ scope_metadata: Self::DIScope,
+ file: &SourceFile,
+ ) -> Self::DIScope;
+ fn debuginfo_finalize(&self);
+
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+ fn create_dbg_var(
+ &self,
+ variable_name: Symbol,
+ variable_type: Ty<'tcx>,
+ scope_metadata: Self::DIScope,
+ variable_kind: VariableKind,
+ span: Span,
+ ) -> Self::DIVariable;
+}
+
+pub trait DebugInfoBuilderMethods: BackendTypes {
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+ fn dbg_var_addr(
+ &mut self,
+ dbg_var: Self::DIVariable,
+ dbg_loc: Self::DILocation,
+ variable_alloca: Self::Value,
+ direct_offset: Size,
+ // NB: each offset implies a deref (i.e. they're steps in a pointer chain).
+ indirect_offsets: &[Size],
+ );
+ fn set_dbg_loc(&mut self, dbg_loc: Self::DILocation);
+ fn insert_reference_to_gdb_debug_scripts_section_global(&mut self);
+ fn set_var_name(&mut self, value: Self::Value, name: &str);
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/declare.rs b/compiler/rustc_codegen_ssa/src/traits/declare.rs
new file mode 100644
index 000000000..655afcd17
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/declare.rs
@@ -0,0 +1,21 @@
+use super::BackendTypes;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty::Instance;
+
+pub trait PreDefineMethods<'tcx>: BackendTypes {
+ fn predefine_static(
+ &self,
+ def_id: DefId,
+ linkage: Linkage,
+ visibility: Visibility,
+ symbol_name: &str,
+ );
+ fn predefine_fn(
+ &self,
+ instance: Instance<'tcx>,
+ linkage: Linkage,
+ visibility: Visibility,
+ symbol_name: &str,
+ );
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs
new file mode 100644
index 000000000..7755e6793
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs
@@ -0,0 +1,39 @@
+use super::BackendTypes;
+use crate::mir::operand::OperandRef;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::Span;
+use rustc_target::abi::call::FnAbi;
+
+pub trait IntrinsicCallMethods<'tcx>: BackendTypes {
+ /// Remember to add all intrinsics here, in `compiler/rustc_typeck/src/check/mod.rs`,
+ /// and in `library/core/src/intrinsics.rs`; if you need access to any LLVM intrinsics,
+ /// add them to `compiler/rustc_codegen_llvm/src/context.rs`.
+ fn codegen_intrinsic_call(
+ &mut self,
+ instance: ty::Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ args: &[OperandRef<'tcx, Self::Value>],
+ llresult: Self::Value,
+ span: Span,
+ );
+
+ fn abort(&mut self);
+ fn assume(&mut self, val: Self::Value);
+ fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value;
+ /// Trait method used to test whether a given pointer is associated with a type identifier.
+ fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value;
+ /// Trait method used to load a function while testing if it is associated with a type
+ /// identifier.
+ fn type_checked_load(
+ &mut self,
+ llvtable: Self::Value,
+ vtable_byte_offset: u64,
+ typeid: Self::Value,
+ ) -> Self::Value;
+ /// Trait method used to inject `va_start` on the "spoofed" `VaListImpl` in
+ /// Rust defined C-variadic functions.
+ fn va_start(&mut self, val: Self::Value) -> Self::Value;
+ /// Trait method used to inject `va_end` on the "spoofed" `VaListImpl` before
+ /// Rust defined C-variadic functions return.
+ fn va_end(&mut self, val: Self::Value) -> Self::Value;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/misc.rs b/compiler/rustc_codegen_ssa/src/traits/misc.rs
new file mode 100644
index 000000000..4266e42ec
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/misc.rs
@@ -0,0 +1,26 @@
+use super::BackendTypes;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::mir::mono::CodegenUnit;
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_session::Session;
+use std::cell::RefCell;
+
+pub trait MiscMethods<'tcx>: BackendTypes {
+ fn vtables(
+ &self,
+ ) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), Self::Value>>;
+ fn check_overflow(&self) -> bool;
+ fn get_fn(&self, instance: Instance<'tcx>) -> Self::Function;
+ fn get_fn_addr(&self, instance: Instance<'tcx>) -> Self::Value;
+ fn eh_personality(&self) -> Self::Value;
+ fn sess(&self) -> &Session;
+ fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx>;
+ fn used_statics(&self) -> &RefCell<Vec<Self::Value>>;
+ fn compiler_used_statics(&self) -> &RefCell<Vec<Self::Value>>;
+ fn set_frame_pointer_type(&self, llfn: Self::Function);
+ fn apply_target_cpu_attr(&self, llfn: Self::Function);
+ fn create_used_variable(&self);
+ fn create_compiler_used_variable(&self);
+ /// Declares the extern "C" main function for the entry point. Returns None if the symbol already exists.
+ fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function>;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/mod.rs b/compiler/rustc_codegen_ssa/src/traits/mod.rs
new file mode 100644
index 000000000..782fdadbf
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/mod.rs
@@ -0,0 +1,102 @@
+//! Interface of a Rust codegen backend
+//!
+//! This crate defines all the traits that have to be implemented by a codegen backend in order to
+//! use the backend-agnostic codegen code in `rustc_codegen_ssa`.
+//!
+//! The interface is designed around two backend-specific data structures, the codegen context and
+//! the builder. The codegen context is supposed to be read-only after its creation and during the
+//! actual codegen, while the builder stores the information about the function during codegen and
+//! is used to produce the instructions of the backend IR.
+//!
+//! Finally, a third `Backend` structure has to implement methods related to how codegen information
+//! is passed to the backend, especially for asynchronous compilation.
+//!
+//! The traits contain associated types that are backend-specific, such as the backend's value or
+//! basic blocks.
+
+mod abi;
+mod asm;
+mod backend;
+mod builder;
+mod consts;
+mod coverageinfo;
+mod debuginfo;
+mod declare;
+mod intrinsic;
+mod misc;
+mod statics;
+mod type_;
+mod write;
+
+pub use self::abi::AbiBuilderMethods;
+pub use self::asm::{AsmBuilderMethods, AsmMethods, GlobalAsmOperandRef, InlineAsmOperandRef};
+pub use self::backend::{Backend, BackendTypes, CodegenBackend, ExtraBackendMethods};
+pub use self::builder::{BuilderMethods, OverflowOp};
+pub use self::consts::ConstMethods;
+pub use self::coverageinfo::{CoverageInfoBuilderMethods, CoverageInfoMethods};
+pub use self::debuginfo::{DebugInfoBuilderMethods, DebugInfoMethods};
+pub use self::declare::PreDefineMethods;
+pub use self::intrinsic::IntrinsicCallMethods;
+pub use self::misc::MiscMethods;
+pub use self::statics::{StaticBuilderMethods, StaticMethods};
+pub use self::type_::{
+ ArgAbiMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, TypeMembershipMethods,
+ TypeMethods,
+};
+pub use self::write::{ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods};
+
+use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt};
+use rustc_target::spec::HasTargetSpec;
+
+use std::fmt;
+
+pub trait CodegenObject: Copy + PartialEq + fmt::Debug {}
+impl<T: Copy + PartialEq + fmt::Debug> CodegenObject for T {}
+
+pub trait CodegenMethods<'tcx>:
+ Backend<'tcx>
+ + TypeMethods<'tcx>
+ + MiscMethods<'tcx>
+ + ConstMethods<'tcx>
+ + StaticMethods
+ + CoverageInfoMethods<'tcx>
+ + DebugInfoMethods<'tcx>
+ + AsmMethods<'tcx>
+ + PreDefineMethods<'tcx>
+ + HasParamEnv<'tcx>
+ + HasTyCtxt<'tcx>
+ + HasTargetSpec
+{
+}
+
+impl<'tcx, T> CodegenMethods<'tcx> for T where
+ Self: Backend<'tcx>
+ + TypeMethods<'tcx>
+ + MiscMethods<'tcx>
+ + ConstMethods<'tcx>
+ + StaticMethods
+ + CoverageInfoMethods<'tcx>
+ + DebugInfoMethods<'tcx>
+ + AsmMethods<'tcx>
+ + PreDefineMethods<'tcx>
+ + HasParamEnv<'tcx>
+ + HasTyCtxt<'tcx>
+ + HasTargetSpec
+{
+}
+
+pub trait HasCodegen<'tcx>:
+ Backend<'tcx> + std::ops::Deref<Target = <Self as HasCodegen<'tcx>>::CodegenCx>
+{
+ type CodegenCx: CodegenMethods<'tcx>
+ + BackendTypes<
+ Value = Self::Value,
+ Function = Self::Function,
+ BasicBlock = Self::BasicBlock,
+ Type = Self::Type,
+ Funclet = Self::Funclet,
+ DIScope = Self::DIScope,
+ DILocation = Self::DILocation,
+ DIVariable = Self::DIVariable,
+ >;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/statics.rs b/compiler/rustc_codegen_ssa/src/traits/statics.rs
new file mode 100644
index 000000000..413d31bb9
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/statics.rs
@@ -0,0 +1,24 @@
+use super::BackendTypes;
+use rustc_hir::def_id::DefId;
+use rustc_target::abi::Align;
+
+pub trait StaticMethods: BackendTypes {
+ fn static_addr_of(&self, cv: Self::Value, align: Align, kind: Option<&str>) -> Self::Value;
+ fn codegen_static(&self, def_id: DefId, is_mutable: bool);
+
+ /// Mark the given global value as "used", to prevent the compiler and linker from potentially
+ /// removing a static variable that may otherwise appear unused.
+ fn add_used_global(&self, global: Self::Value);
+
+ /// Same as add_used_global(), but only prevent the compiler from potentially removing an
+ /// otherwise unused symbol. The linker is still permitted to drop it.
+ ///
+ /// This corresponds to the documented semantics of the `#[used]` attribute, although
+ /// on some targets (non-ELF), we may use `add_used_global` for `#[used]` statics
+ /// instead.
+ fn add_compiler_used_global(&self, global: Self::Value);
+}
+
+pub trait StaticBuilderMethods: BackendTypes {
+ fn get_static(&mut self, def_id: DefId) -> Self::Value;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs
new file mode 100644
index 000000000..8158e8dd0
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs
@@ -0,0 +1,151 @@
+use super::misc::MiscMethods;
+use super::Backend;
+use super::HasCodegen;
+use crate::common::TypeKind;
+use crate::mir::place::PlaceRef;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::DUMMY_SP;
+use rustc_target::abi::call::{ArgAbi, CastTarget, FnAbi, Reg};
+use rustc_target::abi::{AddressSpace, Integer};
+
+// This depends on `Backend` and not `BackendTypes`, because consumers will probably want to use
+// `LayoutOf` or `HasTyCtxt`. This way, they don't have to add a constraint on it themselves.
+pub trait BaseTypeMethods<'tcx>: Backend<'tcx> {
+ fn type_i1(&self) -> Self::Type;
+ fn type_i8(&self) -> Self::Type;
+ fn type_i16(&self) -> Self::Type;
+ fn type_i32(&self) -> Self::Type;
+ fn type_i64(&self) -> Self::Type;
+ fn type_i128(&self) -> Self::Type;
+ fn type_isize(&self) -> Self::Type;
+
+ fn type_f32(&self) -> Self::Type;
+ fn type_f64(&self) -> Self::Type;
+
+ fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
+ fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type;
+ fn type_kind(&self, ty: Self::Type) -> TypeKind;
+ fn type_ptr_to(&self, ty: Self::Type) -> Self::Type;
+ fn type_ptr_to_ext(&self, ty: Self::Type, address_space: AddressSpace) -> Self::Type;
+ fn element_type(&self, ty: Self::Type) -> Self::Type;
+
+ /// Returns the number of elements in `self` if it is a LLVM vector type.
+ fn vector_length(&self, ty: Self::Type) -> usize;
+
+ fn float_width(&self, ty: Self::Type) -> usize;
+
+ /// Retrieves the bit width of the integer type `self`.
+ fn int_width(&self, ty: Self::Type) -> u64;
+
+ fn val_ty(&self, v: Self::Value) -> Self::Type;
+}
+
+pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {
+ fn type_i8p(&self) -> Self::Type {
+ self.type_i8p_ext(AddressSpace::DATA)
+ }
+
+ fn type_i8p_ext(&self, address_space: AddressSpace) -> Self::Type {
+ self.type_ptr_to_ext(self.type_i8(), address_space)
+ }
+
+ fn type_int(&self) -> Self::Type {
+ match &self.sess().target.c_int_width[..] {
+ "16" => self.type_i16(),
+ "32" => self.type_i32(),
+ "64" => self.type_i64(),
+ width => bug!("Unsupported c_int_width: {}", width),
+ }
+ }
+
+ fn type_from_integer(&self, i: Integer) -> Self::Type {
+ use Integer::*;
+ match i {
+ I8 => self.type_i8(),
+ I16 => self.type_i16(),
+ I32 => self.type_i32(),
+ I64 => self.type_i64(),
+ I128 => self.type_i128(),
+ }
+ }
+
+ fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
+ ty.needs_drop(self.tcx(), ty::ParamEnv::reveal_all())
+ }
+
+ fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
+ ty.is_sized(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all())
+ }
+
+ fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
+ ty.is_freeze(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all())
+ }
+
+ fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool {
+ let param_env = ty::ParamEnv::reveal_all();
+ if ty.is_sized(self.tcx().at(DUMMY_SP), param_env) {
+ return false;
+ }
+
+ let tail = self.tcx().struct_tail_erasing_lifetimes(ty, param_env);
+ match tail.kind() {
+ ty::Foreign(..) => false,
+ ty::Str | ty::Slice(..) | ty::Dynamic(..) => true,
+ _ => bug!("unexpected unsized tail: {:?}", tail),
+ }
+ }
+}
+
+impl<'tcx, T> DerivedTypeMethods<'tcx> for T where Self: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {}
+
+pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> {
+ fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Self::Type;
+ fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type;
+ fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Self::Type;
+ fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Self::Type;
+ fn reg_backend_type(&self, ty: &Reg) -> Self::Type;
+ fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Self::Type;
+ fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool;
+ fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool;
+ fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64;
+ fn scalar_pair_element_backend_type(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ index: usize,
+ immediate: bool,
+ ) -> Self::Type;
+}
+
+// For backends that support CFI using type membership (i.e., testing whether a given pointer is
+// associated with a type identifier).
+pub trait TypeMembershipMethods<'tcx>: Backend<'tcx> {
+ fn set_type_metadata(&self, function: Self::Function, typeid: String);
+ fn typeid_metadata(&self, typeid: String) -> Self::Value;
+}
+
+pub trait ArgAbiMethods<'tcx>: HasCodegen<'tcx> {
+ fn store_fn_arg(
+ &mut self,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ idx: &mut usize,
+ dst: PlaceRef<'tcx, Self::Value>,
+ );
+ fn store_arg(
+ &mut self,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ val: Self::Value,
+ dst: PlaceRef<'tcx, Self::Value>,
+ );
+ fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> Self::Type;
+}
+
+pub trait TypeMethods<'tcx>:
+ DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx> + TypeMembershipMethods<'tcx>
+{
+}
+
+impl<'tcx, T> TypeMethods<'tcx> for T where
+ Self: DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx> + TypeMembershipMethods<'tcx>
+{
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/write.rs b/compiler/rustc_codegen_ssa/src/traits/write.rs
new file mode 100644
index 000000000..e54ec34f1
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/write.rs
@@ -0,0 +1,68 @@
+use crate::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
+use crate::back::write::{CodegenContext, FatLTOInput, ModuleConfig};
+use crate::{CompiledModule, ModuleCodegen};
+
+use rustc_errors::{FatalError, Handler};
+use rustc_middle::dep_graph::WorkProduct;
+
+pub trait WriteBackendMethods: 'static + Sized + Clone {
+ type Module: Send + Sync;
+ type TargetMachine;
+ type ModuleBuffer: ModuleBufferMethods;
+ type Context: ?Sized;
+ type ThinData: Send + Sync;
+ type ThinBuffer: ThinBufferMethods;
+
+ /// Merge all modules into main_module and returning it
+ fn run_link(
+ cgcx: &CodegenContext<Self>,
+ diag_handler: &Handler,
+ modules: Vec<ModuleCodegen<Self::Module>>,
+ ) -> Result<ModuleCodegen<Self::Module>, FatalError>;
+ /// Performs fat LTO by merging all modules into a single one and returning it
+ /// for further optimization.
+ fn run_fat_lto(
+ cgcx: &CodegenContext<Self>,
+ modules: Vec<FatLTOInput<Self>>,
+ cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+ ) -> Result<LtoModuleCodegen<Self>, FatalError>;
+ /// Performs thin LTO by performing necessary global analysis and returning two
+ /// lists, one of the modules that need optimization and another for modules that
+ /// can simply be copied over from the incr. comp. cache.
+ fn run_thin_lto(
+ cgcx: &CodegenContext<Self>,
+ modules: Vec<(String, Self::ThinBuffer)>,
+ cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+ ) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError>;
+ fn print_pass_timings(&self);
+ unsafe fn optimize(
+ cgcx: &CodegenContext<Self>,
+ diag_handler: &Handler,
+ module: &ModuleCodegen<Self::Module>,
+ config: &ModuleConfig,
+ ) -> Result<(), FatalError>;
+ fn optimize_fat(
+ cgcx: &CodegenContext<Self>,
+ llmod: &mut ModuleCodegen<Self::Module>,
+ ) -> Result<(), FatalError>;
+ unsafe fn optimize_thin(
+ cgcx: &CodegenContext<Self>,
+ thin: ThinModule<Self>,
+ ) -> Result<ModuleCodegen<Self::Module>, FatalError>;
+ unsafe fn codegen(
+ cgcx: &CodegenContext<Self>,
+ diag_handler: &Handler,
+ module: ModuleCodegen<Self::Module>,
+ config: &ModuleConfig,
+ ) -> Result<CompiledModule, FatalError>;
+ fn prepare_thin(module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer);
+ fn serialize_module(module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer);
+}
+
+pub trait ThinBufferMethods: Send + Sync {
+ fn data(&self) -> &[u8];
+}
+
+pub trait ModuleBufferMethods: Send + Sync {
+ fn data(&self) -> &[u8];
+}