summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_ssa/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_ssa/src')
-rw-r--r--compiler/rustc_codegen_ssa/src/back/link.rs92
-rw-r--r--compiler/rustc_codegen_ssa/src/back/linker.rs48
-rw-r--r--compiler/rustc_codegen_ssa/src/back/metadata.rs96
-rw-r--r--compiler/rustc_codegen_ssa/src/back/rpath.rs38
-rw-r--r--compiler/rustc_codegen_ssa/src/back/rpath/tests.rs35
-rw-r--r--compiler/rustc_codegen_ssa/src/back/symbol_export.rs59
-rw-r--r--compiler/rustc_codegen_ssa/src/back/write.rs359
-rw-r--r--compiler/rustc_codegen_ssa/src/base.rs103
-rw-r--r--compiler/rustc_codegen_ssa/src/codegen_attrs.rs36
-rw-r--r--compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs60
-rw-r--r--compiler/rustc_codegen_ssa/src/errors.rs28
-rw-r--r--compiler/rustc_codegen_ssa/src/lib.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/meth.rs6
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs114
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/debuginfo.rs98
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/intrinsic.rs52
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/mod.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/operand.rs33
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/place.rs53
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs39
-rw-r--r--compiler/rustc_codegen_ssa/src/mono_item.rs8
-rw-r--r--compiler/rustc_codegen_ssa/src/target_features.rs62
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/backend.rs29
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/consts.rs7
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/mod.rs4
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/type_.rs12
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/write.rs5
27 files changed, 753 insertions, 727 deletions
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs
index b603a8787..a7ac728c5 100644
--- a/compiler/rustc_codegen_ssa/src/back/link.rs
+++ b/compiler/rustc_codegen_ssa/src/back/link.rs
@@ -12,8 +12,8 @@ use rustc_metadata::fs::{copy_to_stdout, emit_wrapper_file, METADATA_FILENAME};
use rustc_middle::middle::debugger_visualizer::DebuggerVisualizerFile;
use rustc_middle::middle::dependency_format::Linkage;
use rustc_middle::middle::exported_symbols::SymbolExportKind;
-use rustc_session::config::{self, CFGuard, CrateType, DebugInfo, Strip};
-use rustc_session::config::{OutputFilenames, OutputType, PrintRequest, SplitDwarfKind};
+use rustc_session::config::{self, CFGuard, CrateType, DebugInfo, OutFileName, Strip};
+use rustc_session::config::{OutputFilenames, OutputType, PrintKind, SplitDwarfKind};
use rustc_session::cstore::DllImport;
use rustc_session::output::{check_file_is_writeable, invalid_output_for_target, out_filename};
use rustc_session::search_paths::PathKind;
@@ -69,7 +69,7 @@ pub fn link_binary<'a>(
let _timer = sess.timer("link_binary");
let output_metadata = sess.opts.output_types.contains_key(&OutputType::Metadata);
let mut tempfiles_for_stdout_output: Vec<PathBuf> = Vec::new();
- for &crate_type in sess.crate_types().iter() {
+ for &crate_type in &codegen_results.crate_info.crate_types {
// Ignore executable crates if we have -Z no-codegen, as they will error.
if (sess.opts.unstable_opts.no_codegen || !sess.opts.output_types.should_codegen())
&& !output_metadata
@@ -596,8 +596,10 @@ fn link_staticlib<'a>(
all_native_libs.extend_from_slice(&codegen_results.crate_info.used_libraries);
- if sess.opts.prints.contains(&PrintRequest::NativeStaticLibs) {
- print_native_static_libs(sess, &all_native_libs, &all_rust_dylibs);
+ for print in &sess.opts.prints {
+ if print.kind == PrintKind::NativeStaticLibs {
+ print_native_static_libs(sess, &print.out, &all_native_libs, &all_rust_dylibs);
+ }
}
Ok(())
@@ -744,8 +746,11 @@ fn link_natively<'a>(
cmd.env_remove(k.as_ref());
}
- if sess.opts.prints.contains(&PrintRequest::LinkArgs) {
- println!("{:?}", &cmd);
+ for print in &sess.opts.prints {
+ if print.kind == PrintKind::LinkArgs {
+ let content = format!("{cmd:?}");
+ print.out.overwrite(&content, sess);
+ }
}
// May have not found libraries in the right formats.
@@ -1231,22 +1236,21 @@ fn link_sanitizer_runtime(sess: &Session, linker: &mut dyn Linker, name: &str) {
}
}
- let channel = option_env!("CFG_RELEASE_CHANNEL")
- .map(|channel| format!("-{}", channel))
- .unwrap_or_default();
+ let channel =
+ option_env!("CFG_RELEASE_CHANNEL").map(|channel| format!("-{channel}")).unwrap_or_default();
if sess.target.is_like_osx {
// On Apple platforms, the sanitizer is always built as a dylib, and
// LLVM will link to `@rpath/*.dylib`, so we need to specify an
// rpath to the library as well (the rpath should be absolute, see
// PR #41352 for details).
- let filename = format!("rustc{}_rt.{}", channel, name);
+ let filename = format!("rustc{channel}_rt.{name}");
let path = find_sanitizer_runtime(&sess, &filename);
let rpath = path.to_str().expect("non-utf8 component in path");
linker.args(&["-Wl,-rpath", "-Xlinker", rpath]);
linker.link_dylib(&filename, false, true);
} else {
- let filename = format!("librustc{}_rt.{}.a", channel, name);
+ let filename = format!("librustc{channel}_rt.{name}.a");
let path = find_sanitizer_runtime(&sess, &filename).join(&filename);
linker.link_whole_rlib(&path);
}
@@ -1386,12 +1390,18 @@ enum RlibFlavor {
fn print_native_static_libs(
sess: &Session,
+ out: &OutFileName,
all_native_libs: &[NativeLib],
all_rust_dylibs: &[&Path],
) {
let mut lib_args: Vec<_> = all_native_libs
.iter()
.filter(|l| relevant_lib(sess, l))
+ // Deduplication of successive repeated libraries, see rust-lang/rust#113209
+ //
+ // note: we don't use PartialEq/Eq because NativeLib transitively depends on local
+ // elements like spans, which we don't care about and would make the deduplication impossible
+ .dedup_by(|l1, l2| l1.name == l2.name && l1.kind == l2.kind && l1.verbatim == l2.verbatim)
.filter_map(|lib| {
let name = lib.name;
match lib.kind {
@@ -1404,12 +1414,12 @@ fn print_native_static_libs(
} else if sess.target.linker_flavor.is_gnu() {
Some(format!("-l{}{}", if verbatim { ":" } else { "" }, name))
} else {
- Some(format!("-l{}", name))
+ Some(format!("-l{name}"))
}
}
NativeLibKind::Framework { .. } => {
// ld-only syntax, since there are no frameworks in MSVC
- Some(format!("-framework {}", name))
+ Some(format!("-framework {name}"))
}
// These are included, no need to print them
NativeLibKind::Static { bundle: None | Some(true), .. }
@@ -1446,19 +1456,30 @@ fn print_native_static_libs(
// `foo.lib` file if the dll doesn't actually export any symbols, so we
// check to see if the file is there and just omit linking to it if it's
// not present.
- let name = format!("{}.dll.lib", lib);
+ let name = format!("{lib}.dll.lib");
if path.join(&name).exists() {
lib_args.push(name);
}
} else {
- lib_args.push(format!("-l{}", lib));
+ lib_args.push(format!("-l{lib}"));
}
}
- if !lib_args.is_empty() {
- sess.emit_note(errors::StaticLibraryNativeArtifacts);
- // Prefix for greppability
- // Note: This must not be translated as tools are allowed to depend on this exact string.
- sess.note_without_error(format!("native-static-libs: {}", &lib_args.join(" ")));
+
+ match out {
+ OutFileName::Real(path) => {
+ out.overwrite(&lib_args.join(" "), sess);
+ if !lib_args.is_empty() {
+ sess.emit_note(errors::StaticLibraryNativeArtifactsToFile { path });
+ }
+ }
+ OutFileName::Stdout => {
+ if !lib_args.is_empty() {
+ sess.emit_note(errors::StaticLibraryNativeArtifacts);
+ // Prefix for greppability
+ // Note: This must not be translated as tools are allowed to depend on this exact string.
+ sess.note_without_error(format!("native-static-libs: {}", &lib_args.join(" ")));
+ }
+ }
}
}
@@ -1606,8 +1627,8 @@ fn exec_linker(
write!(f, "\"")?;
for c in self.arg.chars() {
match c {
- '"' => write!(f, "\\{}", c)?,
- c => write!(f, "{}", c)?,
+ '"' => write!(f, "\\{c}")?,
+ c => write!(f, "{c}")?,
}
}
write!(f, "\"")?;
@@ -1624,8 +1645,8 @@ fn exec_linker(
// ensure the line is interpreted as one whole argument.
for c in self.arg.chars() {
match c {
- '\\' | ' ' => write!(f, "\\{}", c)?,
- c => write!(f, "{}", c)?,
+ '\\' | ' ' => write!(f, "\\{c}")?,
+ c => write!(f, "{c}")?,
}
}
}
@@ -2262,7 +2283,7 @@ fn add_order_independent_options(
} else {
""
};
- cmd.arg(format!("--dynamic-linker={}ld.so.1", prefix));
+ cmd.arg(format!("--dynamic-linker={prefix}ld.so.1"));
}
if sess.target.eh_frame_header {
@@ -2970,25 +2991,10 @@ fn add_lld_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
return;
}
- let self_contained_linker = sess.opts.cg.link_self_contained.linker();
-
- // FIXME: some targets default to using `lld`, but users can only override the linker on the CLI
- // and cannot yet select the precise linker flavor to opt out of that. See for example issue
- // #113597 for the `thumbv6m-none-eabi` target: a driver is used, and its default linker
- // conflicts with the target's flavor, causing unexpected arguments being passed.
- //
- // Until the new `LinkerFlavor`-like CLI options are stabilized, we only adopt MCP510's behavior
- // if its dedicated unstable CLI flags are used, to keep the current sub-optimal stable
- // behavior.
- let using_mcp510 =
- self_contained_linker || sess.opts.cg.linker_flavor.is_some_and(|f| f.is_unstable());
- if !using_mcp510 && !unstable_use_lld {
- return;
- }
-
// 1. Implement the "self-contained" part of this feature by adding rustc distribution
// directories to the tool's search path.
- if self_contained_linker || unstable_use_lld {
+ let self_contained_linker = sess.opts.cg.link_self_contained.linker() || unstable_use_lld;
+ if self_contained_linker {
for path in sess.get_tools_search_paths(false) {
cmd.arg({
let mut arg = OsString::from("-B");
diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs
index 8ac86fa4b..11afe0fbc 100644
--- a/compiler/rustc_codegen_ssa/src/back/linker.rs
+++ b/compiler/rustc_codegen_ssa/src/back/linker.rs
@@ -310,7 +310,7 @@ impl<'a> GccLinker<'a> {
self.linker_arg(&format!("-plugin-opt=sample-profile={}", path.display()));
};
self.linker_args(&[
- &format!("-plugin-opt={}", opt_level),
+ &format!("-plugin-opt={opt_level}"),
&format!("-plugin-opt=mcpu={}", self.target_cpu),
]);
}
@@ -488,7 +488,7 @@ impl<'a> Linker for GccLinker<'a> {
fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
self.hint_dynamic();
- self.cmd.arg(format!("-l{}", lib));
+ self.cmd.arg(format!("-l{lib}"));
}
fn link_framework(&mut self, framework: &str, as_needed: bool) {
@@ -670,8 +670,8 @@ impl<'a> Linker for GccLinker<'a> {
let res: io::Result<()> = try {
let mut f = BufWriter::new(File::create(&path)?);
for sym in symbols {
- debug!(" _{}", sym);
- writeln!(f, "_{}", sym)?;
+ debug!(" _{sym}");
+ writeln!(f, "_{sym}")?;
}
};
if let Err(error) = res {
@@ -685,8 +685,8 @@ impl<'a> Linker for GccLinker<'a> {
// because LD doesn't like when it's empty
writeln!(f, "EXPORTS")?;
for symbol in symbols {
- debug!(" _{}", symbol);
- writeln!(f, " {}", symbol)?;
+ debug!(" _{symbol}");
+ writeln!(f, " {symbol}")?;
}
};
if let Err(error) = res {
@@ -700,8 +700,8 @@ impl<'a> Linker for GccLinker<'a> {
if !symbols.is_empty() {
writeln!(f, " global:")?;
for sym in symbols {
- debug!(" {};", sym);
- writeln!(f, " {};", sym)?;
+ debug!(" {sym};");
+ writeln!(f, " {sym};")?;
}
}
writeln!(f, "\n local:\n *;\n}};")?;
@@ -836,7 +836,7 @@ impl<'a> Linker for MsvcLinker<'a> {
// `foo.lib` file if the dll doesn't actually export any symbols, so we
// check to see if the file is there and just omit linking to it if it's
// not present.
- let name = format!("{}.dll.lib", lib);
+ let name = format!("{lib}.dll.lib");
if path.join(&name).exists() {
self.cmd.arg(name);
}
@@ -976,8 +976,8 @@ impl<'a> Linker for MsvcLinker<'a> {
writeln!(f, "LIBRARY")?;
writeln!(f, "EXPORTS")?;
for symbol in symbols {
- debug!(" _{}", symbol);
- writeln!(f, " {}", symbol)?;
+ debug!(" _{symbol}");
+ writeln!(f, " {symbol}")?;
}
};
if let Err(error) = res {
@@ -991,7 +991,7 @@ impl<'a> Linker for MsvcLinker<'a> {
fn subsystem(&mut self, subsystem: &str) {
// Note that previous passes of the compiler validated this subsystem,
// so we just blindly pass it to the linker.
- self.cmd.arg(&format!("/SUBSYSTEM:{}", subsystem));
+ self.cmd.arg(&format!("/SUBSYSTEM:{subsystem}"));
// Windows has two subsystems we're interested in right now, the console
// and windows subsystems. These both implicitly have different entry
@@ -1146,7 +1146,7 @@ impl<'a> Linker for EmLinker<'a> {
&symbols.iter().map(|sym| "_".to_owned() + sym).collect::<Vec<_>>(),
)
.unwrap();
- debug!("{}", encoded);
+ debug!("{encoded}");
arg.push(encoded);
@@ -1349,7 +1349,7 @@ impl<'a> Linker for L4Bender<'a> {
}
fn link_staticlib(&mut self, lib: &str, _verbatim: bool) {
self.hint_static();
- self.cmd.arg(format!("-PC{}", lib));
+ self.cmd.arg(format!("-PC{lib}"));
}
fn link_rlib(&mut self, lib: &Path) {
self.hint_static();
@@ -1398,7 +1398,7 @@ impl<'a> Linker for L4Bender<'a> {
fn link_whole_staticlib(&mut self, lib: &str, _verbatim: bool, _search_path: &[PathBuf]) {
self.hint_static();
- self.cmd.arg("--whole-archive").arg(format!("-l{}", lib));
+ self.cmd.arg("--whole-archive").arg(format!("-l{lib}"));
self.cmd.arg("--no-whole-archive");
}
@@ -1452,7 +1452,7 @@ impl<'a> Linker for L4Bender<'a> {
}
fn subsystem(&mut self, subsystem: &str) {
- self.cmd.arg(&format!("--subsystem {}", subsystem));
+ self.cmd.arg(&format!("--subsystem {subsystem}"));
}
fn reset_per_library_state(&mut self) {
@@ -1517,12 +1517,12 @@ impl<'a> AixLinker<'a> {
impl<'a> Linker for AixLinker<'a> {
fn link_dylib(&mut self, lib: &str, _verbatim: bool, _as_needed: bool) {
self.hint_dynamic();
- self.cmd.arg(format!("-l{}", lib));
+ self.cmd.arg(format!("-l{lib}"));
}
fn link_staticlib(&mut self, lib: &str, _verbatim: bool) {
self.hint_static();
- self.cmd.arg(format!("-l{}", lib));
+ self.cmd.arg(format!("-l{lib}"));
}
fn link_rlib(&mut self, lib: &Path) {
@@ -1572,7 +1572,7 @@ impl<'a> Linker for AixLinker<'a> {
fn link_rust_dylib(&mut self, lib: &str, _: &Path) {
self.hint_dynamic();
- self.cmd.arg(format!("-l{}", lib));
+ self.cmd.arg(format!("-l{lib}"));
}
fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
@@ -1625,12 +1625,12 @@ impl<'a> Linker for AixLinker<'a> {
let mut f = BufWriter::new(File::create(&path)?);
// FIXME: use llvm-nm to generate export list.
for symbol in symbols {
- debug!(" _{}", symbol);
- writeln!(f, " {}", symbol)?;
+ debug!(" _{symbol}");
+ writeln!(f, " {symbol}")?;
}
};
if let Err(e) = res {
- self.sess.fatal(format!("failed to write export file: {}", e));
+ self.sess.fatal(format!("failed to write export file: {e}"));
}
self.cmd.arg(format!("-bE:{}", path.to_str().unwrap()));
}
@@ -1703,7 +1703,7 @@ fn exported_symbols_for_proc_macro_crate(tcx: TyCtxt<'_>) -> Vec<String> {
return Vec::new();
}
- let stable_crate_id = tcx.sess.local_stable_crate_id();
+ let stable_crate_id = tcx.stable_crate_id(LOCAL_CRATE);
let proc_macro_decls_name = tcx.sess.generate_proc_macro_decls_symbol(stable_crate_id);
let metadata_symbol_name = exported_symbols::metadata_symbol_name(tcx);
@@ -1927,7 +1927,7 @@ impl<'a> Linker for BpfLinker<'a> {
let res: io::Result<()> = try {
let mut f = BufWriter::new(File::create(&path)?);
for sym in symbols {
- writeln!(f, "{}", sym)?;
+ writeln!(f, "{sym}")?;
}
};
if let Err(error) = res {
diff --git a/compiler/rustc_codegen_ssa/src/back/metadata.rs b/compiler/rustc_codegen_ssa/src/back/metadata.rs
index 00e6acb5c..4c8547407 100644
--- a/compiler/rustc_codegen_ssa/src/back/metadata.rs
+++ b/compiler/rustc_codegen_ssa/src/back/metadata.rs
@@ -10,15 +10,13 @@ use object::{
ObjectSymbol, SectionFlags, SectionKind, SymbolFlags, SymbolKind, SymbolScope,
};
-use snap::write::FrameEncoder;
-
-use object::elf::NT_GNU_PROPERTY_TYPE_0;
use rustc_data_structures::memmap::Mmap;
use rustc_data_structures::owned_slice::{try_slice_owned, OwnedSlice};
use rustc_metadata::fs::METADATA_FILENAME;
use rustc_metadata::EncodedMetadata;
use rustc_session::cstore::MetadataLoader;
use rustc_session::Session;
+use rustc_span::sym;
use rustc_target::abi::Endian;
use rustc_target::spec::{ef_avr_arch, RelocModel, Target};
@@ -124,7 +122,7 @@ fn add_gnu_property_note(
let mut data: Vec<u8> = Vec::new();
let n_namsz: u32 = 4; // Size of the n_name field
let n_descsz: u32 = 16; // Size of the n_desc field
- let n_type: u32 = NT_GNU_PROPERTY_TYPE_0; // Type of note descriptor
+ let n_type: u32 = object::elf::NT_GNU_PROPERTY_TYPE_0; // Type of note descriptor
let header_values = [n_namsz, n_descsz, n_type];
header_values.iter().for_each(|v| {
data.extend_from_slice(&match endianness {
@@ -134,8 +132,8 @@ fn add_gnu_property_note(
});
data.extend_from_slice(b"GNU\0"); // Owner of the program property note
let pr_type: u32 = match architecture {
- Architecture::X86_64 => 0xc0000002,
- Architecture::Aarch64 => 0xc0000000,
+ Architecture::X86_64 => object::elf::GNU_PROPERTY_X86_FEATURE_1_AND,
+ Architecture::Aarch64 => object::elf::GNU_PROPERTY_AARCH64_FEATURE_1_AND,
_ => unreachable!(),
};
let pr_datasz: u32 = 4; //size of the pr_data field
@@ -161,20 +159,19 @@ pub(super) fn get_metadata_xcoff<'a>(path: &Path, data: &'a [u8]) -> Result<&'a
{
let offset = metadata_symbol.address() as usize;
if offset < 4 {
- return Err(format!("Invalid metadata symbol offset: {}", offset));
+ return Err(format!("Invalid metadata symbol offset: {offset}"));
}
// The offset specifies the location of rustc metadata in the comment section.
// The metadata is preceded by a 4-byte length field.
let len = u32::from_be_bytes(info_data[(offset - 4)..offset].try_into().unwrap()) as usize;
if offset + len > (info_data.len() as usize) {
return Err(format!(
- "Metadata at offset {} with size {} is beyond .info section",
- offset, len
+ "Metadata at offset {offset} with size {len} is beyond .info section"
));
}
return Ok(&info_data[offset..(offset + len)]);
} else {
- return Err(format!("Unable to find symbol {}", AIX_METADATA_SYMBOL_NAME));
+ return Err(format!("Unable to find symbol {AIX_METADATA_SYMBOL_NAME}"));
};
}
@@ -194,8 +191,8 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
}
"x86" => Architecture::I386,
"s390x" => Architecture::S390x,
- "mips" => Architecture::Mips,
- "mips64" => Architecture::Mips64,
+ "mips" | "mips32r6" => Architecture::Mips,
+ "mips64" | "mips64r6" => Architecture::Mips64,
"x86_64" => {
if sess.target.pointer_width == 32 {
Architecture::X86_64_X32
@@ -213,6 +210,7 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
"hexagon" => Architecture::Hexagon,
"bpf" => Architecture::Bpf,
"loongarch64" => Architecture::LoongArch64,
+ "csky" => Architecture::Csky,
// Unsupported architecture.
_ => return None,
};
@@ -243,8 +241,16 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
s if s.contains("r6") => elf::EF_MIPS_ARCH_32R6,
_ => elf::EF_MIPS_ARCH_32R2,
};
- // The only ABI LLVM supports for 32-bit MIPS CPUs is o32.
- let mut e_flags = elf::EF_MIPS_CPIC | elf::EF_MIPS_ABI_O32 | arch;
+
+ let mut e_flags = elf::EF_MIPS_CPIC | arch;
+
+ // If the ABI is explicitly given, use it or default to O32.
+ match sess.target.options.llvm_abiname.to_lowercase().as_str() {
+ "n32" => e_flags |= elf::EF_MIPS_ABI2,
+ "o32" => e_flags |= elf::EF_MIPS_ABI_O32,
+ _ => e_flags |= elf::EF_MIPS_ABI_O32,
+ };
+
if sess.target.options.relocation_model != RelocModel::Static {
e_flags |= elf::EF_MIPS_PIC;
}
@@ -267,35 +273,38 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
Architecture::Riscv32 | Architecture::Riscv64 => {
// Source: https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/079772828bd10933d34121117a222b4cc0ee2200/riscv-elf.adoc
let mut e_flags: u32 = 0x0;
- let features = &sess.target.options.features;
+
// Check if compressed is enabled
- if features.contains("+c") {
+ // `unstable_target_features` is used here because "c" is gated behind riscv_target_feature.
+ if sess.unstable_target_features.contains(&sym::c) {
e_flags |= elf::EF_RISCV_RVC;
}
- // Select the appropriate floating-point ABI
- if features.contains("+d") {
- e_flags |= elf::EF_RISCV_FLOAT_ABI_DOUBLE;
- } else if features.contains("+f") {
- e_flags |= elf::EF_RISCV_FLOAT_ABI_SINGLE;
- } else {
- e_flags |= elf::EF_RISCV_FLOAT_ABI_SOFT;
+ // Set the appropriate flag based on ABI
+ // This needs to match LLVM `RISCVELFStreamer.cpp`
+ match &*sess.target.llvm_abiname {
+ "" | "ilp32" | "lp64" => (),
+ "ilp32f" | "lp64f" => e_flags |= elf::EF_RISCV_FLOAT_ABI_SINGLE,
+ "ilp32d" | "lp64d" => e_flags |= elf::EF_RISCV_FLOAT_ABI_DOUBLE,
+ "ilp32e" => e_flags |= elf::EF_RISCV_RVE,
+ _ => bug!("unknown RISC-V ABI name"),
}
+
e_flags
}
Architecture::LoongArch64 => {
// Source: https://github.com/loongson/la-abi-specs/blob/release/laelf.adoc#e_flags-identifies-abi-type-and-version
let mut e_flags: u32 = elf::EF_LARCH_OBJABI_V1;
- let features = &sess.target.options.features;
- // Select the appropriate floating-point ABI
- if features.contains("+d") {
- e_flags |= elf::EF_LARCH_ABI_DOUBLE_FLOAT;
- } else if features.contains("+f") {
- e_flags |= elf::EF_LARCH_ABI_SINGLE_FLOAT;
- } else {
- e_flags |= elf::EF_LARCH_ABI_SOFT_FLOAT;
+ // Set the appropriate flag based on ABI
+ // This needs to match LLVM `LoongArchELFStreamer.cpp`
+ match &*sess.target.llvm_abiname {
+ "ilp32s" | "lp64s" => e_flags |= elf::EF_LARCH_ABI_SOFT_FLOAT,
+ "ilp32f" | "lp64f" => e_flags |= elf::EF_LARCH_ABI_SINGLE_FLOAT,
+ "ilp32d" | "lp64d" => e_flags |= elf::EF_LARCH_ABI_DOUBLE_FLOAT,
+ _ => bug!("unknown RISC-V ABI name"),
}
+
e_flags
}
Architecture::Avr => {
@@ -303,6 +312,13 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
// the appropriate EF_AVR_ARCH flag.
ef_avr_arch(&sess.target.options.cpu)
}
+ Architecture::Csky => {
+ let e_flags = match sess.target.options.abi.as_ref() {
+ "abiv2" => elf::EF_CSKY_ABIV2,
+ _ => elf::EF_CSKY_ABIV1,
+ };
+ e_flags
+ }
_ => 0,
};
// adapted from LLVM's `MCELFObjectTargetWriter::getOSABI`
@@ -474,19 +490,15 @@ pub fn create_compressed_metadata_file(
metadata: &EncodedMetadata,
symbol_name: &str,
) -> Vec<u8> {
- let mut compressed = rustc_metadata::METADATA_HEADER.to_vec();
- // Our length will be backfilled once we're done writing
- compressed.write_all(&[0; 4]).unwrap();
- FrameEncoder::new(&mut compressed).write_all(metadata.raw_data()).unwrap();
- let meta_len = rustc_metadata::METADATA_HEADER.len();
- let data_len = (compressed.len() - meta_len - 4) as u32;
- compressed[meta_len..meta_len + 4].copy_from_slice(&data_len.to_be_bytes());
+ let mut packed_metadata = rustc_metadata::METADATA_HEADER.to_vec();
+ packed_metadata.write_all(&(metadata.raw_data().len() as u32).to_be_bytes()).unwrap();
+ packed_metadata.extend(metadata.raw_data());
let Some(mut file) = create_object_file(sess) else {
- return compressed.to_vec();
+ return packed_metadata.to_vec();
};
if file.format() == BinaryFormat::Xcoff {
- return create_compressed_metadata_file_for_xcoff(file, &compressed, symbol_name);
+ return create_compressed_metadata_file_for_xcoff(file, &packed_metadata, symbol_name);
}
let section = file.add_section(
file.segment_name(StandardSegment::Data).to_vec(),
@@ -500,14 +512,14 @@ pub fn create_compressed_metadata_file(
}
_ => {}
};
- let offset = file.append_section_data(section, &compressed, 1);
+ let offset = file.append_section_data(section, &packed_metadata, 1);
// For MachO and probably PE this is necessary to prevent the linker from throwing away the
// .rustc section. For ELF this isn't necessary, but it also doesn't harm.
file.add_symbol(Symbol {
name: symbol_name.as_bytes().to_vec(),
value: offset,
- size: compressed.len() as u64,
+ size: packed_metadata.len() as u64,
kind: SymbolKind::Data,
scope: SymbolScope::Dynamic,
weak: false,
diff --git a/compiler/rustc_codegen_ssa/src/back/rpath.rs b/compiler/rustc_codegen_ssa/src/back/rpath.rs
index 0b5656c9a..ebf04e7a3 100644
--- a/compiler/rustc_codegen_ssa/src/back/rpath.rs
+++ b/compiler/rustc_codegen_ssa/src/back/rpath.rs
@@ -1,6 +1,7 @@
use pathdiff::diff_paths;
use rustc_data_structures::fx::FxHashSet;
use std::env;
+use std::ffi::OsString;
use std::fs;
use std::path::{Path, PathBuf};
@@ -12,7 +13,7 @@ pub struct RPathConfig<'a> {
pub linker_is_gnu: bool,
}
-pub fn get_rpath_flags(config: &mut RPathConfig<'_>) -> Vec<String> {
+pub fn get_rpath_flags(config: &mut RPathConfig<'_>) -> Vec<OsString> {
// No rpath on windows
if !config.has_rpath {
return Vec::new();
@@ -21,36 +22,38 @@ pub fn get_rpath_flags(config: &mut RPathConfig<'_>) -> Vec<String> {
debug!("preparing the RPATH!");
let rpaths = get_rpaths(config);
- let mut flags = rpaths_to_flags(&rpaths);
+ let mut flags = rpaths_to_flags(rpaths);
if config.linker_is_gnu {
// Use DT_RUNPATH instead of DT_RPATH if available
- flags.push("-Wl,--enable-new-dtags".to_owned());
+ flags.push("-Wl,--enable-new-dtags".into());
// Set DF_ORIGIN for substitute $ORIGIN
- flags.push("-Wl,-z,origin".to_owned());
+ flags.push("-Wl,-z,origin".into());
}
flags
}
-fn rpaths_to_flags(rpaths: &[String]) -> Vec<String> {
+fn rpaths_to_flags(rpaths: Vec<OsString>) -> Vec<OsString> {
let mut ret = Vec::with_capacity(rpaths.len()); // the minimum needed capacity
for rpath in rpaths {
- if rpath.contains(',') {
+ if rpath.to_string_lossy().contains(',') {
ret.push("-Wl,-rpath".into());
ret.push("-Xlinker".into());
- ret.push(rpath.clone());
+ ret.push(rpath);
} else {
- ret.push(format!("-Wl,-rpath,{}", &(*rpath)));
+ let mut single_arg = OsString::from("-Wl,-rpath,");
+ single_arg.push(rpath);
+ ret.push(single_arg);
}
}
ret
}
-fn get_rpaths(config: &mut RPathConfig<'_>) -> Vec<String> {
+fn get_rpaths(config: &mut RPathConfig<'_>) -> Vec<OsString> {
debug!("output: {:?}", config.out_filename.display());
debug!("libs:");
for libpath in config.libs {
@@ -64,18 +67,18 @@ fn get_rpaths(config: &mut RPathConfig<'_>) -> Vec<String> {
debug!("rpaths:");
for rpath in &rpaths {
- debug!(" {}", rpath);
+ debug!(" {:?}", rpath);
}
// Remove duplicates
minimize_rpaths(&rpaths)
}
-fn get_rpaths_relative_to_output(config: &mut RPathConfig<'_>) -> Vec<String> {
+fn get_rpaths_relative_to_output(config: &mut RPathConfig<'_>) -> Vec<OsString> {
config.libs.iter().map(|a| get_rpath_relative_to_output(config, a)).collect()
}
-fn get_rpath_relative_to_output(config: &mut RPathConfig<'_>, lib: &Path) -> String {
+fn get_rpath_relative_to_output(config: &mut RPathConfig<'_>, lib: &Path) -> OsString {
// Mac doesn't appear to support $ORIGIN
let prefix = if config.is_like_osx { "@loader_path" } else { "$ORIGIN" };
@@ -86,9 +89,12 @@ fn get_rpath_relative_to_output(config: &mut RPathConfig<'_>, lib: &Path) -> Str
output.pop(); // strip filename
let output = fs::canonicalize(&output).unwrap_or(output);
let relative = path_relative_from(&lib, &output)
- .unwrap_or_else(|| panic!("couldn't create relative path from {:?} to {:?}", output, lib));
- // FIXME (#9639): This needs to handle non-utf8 paths
- format!("{}/{}", prefix, relative.to_str().expect("non-utf8 component in path"))
+ .unwrap_or_else(|| panic!("couldn't create relative path from {output:?} to {lib:?}"));
+
+ let mut rpath = OsString::from(prefix);
+ rpath.push("/");
+ rpath.push(relative);
+ rpath
}
// This routine is adapted from the *old* Path's `path_relative_from`
@@ -99,7 +105,7 @@ fn path_relative_from(path: &Path, base: &Path) -> Option<PathBuf> {
diff_paths(path, base)
}
-fn minimize_rpaths(rpaths: &[String]) -> Vec<String> {
+fn minimize_rpaths(rpaths: &[OsString]) -> Vec<OsString> {
let mut set = FxHashSet::default();
let mut minimized = Vec::new();
for rpath in rpaths {
diff --git a/compiler/rustc_codegen_ssa/src/back/rpath/tests.rs b/compiler/rustc_codegen_ssa/src/back/rpath/tests.rs
index 604f19144..ac2e54072 100644
--- a/compiler/rustc_codegen_ssa/src/back/rpath/tests.rs
+++ b/compiler/rustc_codegen_ssa/src/back/rpath/tests.rs
@@ -1,32 +1,33 @@
use super::RPathConfig;
use super::{get_rpath_relative_to_output, minimize_rpaths, rpaths_to_flags};
+use std::ffi::OsString;
use std::path::{Path, PathBuf};
#[test]
fn test_rpaths_to_flags() {
- let flags = rpaths_to_flags(&["path1".to_string(), "path2".to_string()]);
+ let flags = rpaths_to_flags(vec!["path1".into(), "path2".into()]);
assert_eq!(flags, ["-Wl,-rpath,path1", "-Wl,-rpath,path2"]);
}
#[test]
fn test_minimize1() {
- let res = minimize_rpaths(&["rpath1".to_string(), "rpath2".to_string(), "rpath1".to_string()]);
+ let res = minimize_rpaths(&["rpath1".into(), "rpath2".into(), "rpath1".into()]);
assert!(res == ["rpath1", "rpath2",]);
}
#[test]
fn test_minimize2() {
let res = minimize_rpaths(&[
- "1a".to_string(),
- "2".to_string(),
- "2".to_string(),
- "1a".to_string(),
- "4a".to_string(),
- "1a".to_string(),
- "2".to_string(),
- "3".to_string(),
- "4a".to_string(),
- "3".to_string(),
+ "1a".into(),
+ "2".into(),
+ "2".into(),
+ "1a".into(),
+ "4a".into(),
+ "1a".into(),
+ "2".into(),
+ "3".into(),
+ "4a".into(),
+ "3".into(),
]);
assert!(res == ["1a", "2", "4a", "3",]);
}
@@ -58,15 +59,15 @@ fn test_rpath_relative() {
#[test]
fn test_xlinker() {
- let args = rpaths_to_flags(&["a/normal/path".to_string(), "a,comma,path".to_string()]);
+ let args = rpaths_to_flags(vec!["a/normal/path".into(), "a,comma,path".into()]);
assert_eq!(
args,
vec![
- "-Wl,-rpath,a/normal/path".to_string(),
- "-Wl,-rpath".to_string(),
- "-Xlinker".to_string(),
- "a,comma,path".to_string()
+ OsString::from("-Wl,-rpath,a/normal/path"),
+ OsString::from("-Wl,-rpath"),
+ OsString::from("-Xlinker"),
+ OsString::from("a,comma,path")
]
);
}
diff --git a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
index a8b6030ac..8fb2ccb7e 100644
--- a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
+++ b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
@@ -12,14 +12,14 @@ use rustc_middle::middle::exported_symbols::{
};
use rustc_middle::query::LocalCrate;
use rustc_middle::query::{ExternProviders, Providers};
-use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
use rustc_middle::ty::Instance;
use rustc_middle::ty::{self, SymbolName, TyCtxt};
+use rustc_middle::ty::{GenericArgKind, GenericArgsRef};
use rustc_session::config::{CrateType, OomStrategy};
use rustc_target::spec::SanitizerSet;
pub fn threshold(tcx: TyCtxt<'_>) -> SymbolExportLevel {
- crates_export_threshold(&tcx.sess.crate_types())
+ crates_export_threshold(tcx.crate_types())
}
fn crate_export_threshold(crate_type: CrateType) -> SymbolExportLevel {
@@ -233,15 +233,6 @@ fn exported_symbols_provider_local(
));
}
- symbols.push((
- ExportedSymbol::NoDefId(SymbolName::new(tcx, OomStrategy::SYMBOL)),
- SymbolExportInfo {
- level: SymbolExportLevel::Rust,
- kind: SymbolExportKind::Text,
- used: false,
- },
- ));
-
let exported_symbol =
ExportedSymbol::NoDefId(SymbolName::new(tcx, NO_ALLOC_SHIM_IS_UNSTABLE));
symbols.push((
@@ -299,8 +290,8 @@ fn exported_symbols_provider_local(
}));
}
- if tcx.sess.crate_types().contains(&CrateType::Dylib)
- || tcx.sess.crate_types().contains(&CrateType::ProcMacro)
+ if tcx.crate_types().contains(&CrateType::Dylib)
+ || tcx.crate_types().contains(&CrateType::ProcMacro)
{
let symbol_name = metadata_symbol_name(tcx);
let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, &symbol_name));
@@ -328,23 +319,23 @@ fn exported_symbols_provider_local(
let (_, cgus) = tcx.collect_and_partition_mono_items(());
- for (mono_item, &(linkage, visibility)) in cgus.iter().flat_map(|cgu| cgu.items().iter()) {
- if linkage != Linkage::External {
+ for (mono_item, data) in cgus.iter().flat_map(|cgu| cgu.items().iter()) {
+ if data.linkage != Linkage::External {
// We can only re-use things with external linkage, otherwise
// we'll get a linker error
continue;
}
- if need_visibility && visibility == Visibility::Hidden {
+ if need_visibility && data.visibility == Visibility::Hidden {
// If we potentially share things from Rust dylibs, they must
// not be hidden
continue;
}
match *mono_item {
- MonoItem::Fn(Instance { def: InstanceDef::Item(def), substs }) => {
- if substs.non_erasable_generics().next().is_some() {
- let symbol = ExportedSymbol::Generic(def, substs);
+ MonoItem::Fn(Instance { def: InstanceDef::Item(def), args }) => {
+ if args.non_erasable_generics().next().is_some() {
+ let symbol = ExportedSymbol::Generic(def, args);
symbols.push((
symbol,
SymbolExportInfo {
@@ -355,10 +346,10 @@ fn exported_symbols_provider_local(
));
}
}
- MonoItem::Fn(Instance { def: InstanceDef::DropGlue(_, Some(ty)), substs }) => {
+ MonoItem::Fn(Instance { def: InstanceDef::DropGlue(_, Some(ty)), args }) => {
// A little sanity-check
debug_assert_eq!(
- substs.non_erasable_generics().next(),
+ args.non_erasable_generics().next(),
Some(GenericArgKind::Type(ty))
);
symbols.push((
@@ -386,7 +377,7 @@ fn exported_symbols_provider_local(
fn upstream_monomorphizations_provider(
tcx: TyCtxt<'_>,
(): (),
-) -> DefIdMap<FxHashMap<SubstsRef<'_>, CrateNum>> {
+) -> DefIdMap<FxHashMap<GenericArgsRef<'_>, CrateNum>> {
let cnums = tcx.crates(());
let mut instances: DefIdMap<FxHashMap<_, _>> = Default::default();
@@ -395,11 +386,11 @@ fn upstream_monomorphizations_provider(
for &cnum in cnums.iter() {
for (exported_symbol, _) in tcx.exported_symbols(cnum).iter() {
- let (def_id, substs) = match *exported_symbol {
- ExportedSymbol::Generic(def_id, substs) => (def_id, substs),
+ let (def_id, args) = match *exported_symbol {
+ ExportedSymbol::Generic(def_id, args) => (def_id, args),
ExportedSymbol::DropGlue(ty) => {
if let Some(drop_in_place_fn_def_id) = drop_in_place_fn_def_id {
- (drop_in_place_fn_def_id, tcx.mk_substs(&[ty.into()]))
+ (drop_in_place_fn_def_id, tcx.mk_args(&[ty.into()]))
} else {
// `drop_in_place` in place does not exist, don't try
// to use it.
@@ -414,9 +405,9 @@ fn upstream_monomorphizations_provider(
}
};
- let substs_map = instances.entry(def_id).or_default();
+ let args_map = instances.entry(def_id).or_default();
- match substs_map.entry(substs) {
+ match args_map.entry(args) {
Occupied(mut e) => {
// If there are multiple monomorphizations available,
// we select one deterministically.
@@ -438,17 +429,17 @@ fn upstream_monomorphizations_provider(
fn upstream_monomorphizations_for_provider(
tcx: TyCtxt<'_>,
def_id: DefId,
-) -> Option<&FxHashMap<SubstsRef<'_>, CrateNum>> {
+) -> Option<&FxHashMap<GenericArgsRef<'_>, CrateNum>> {
debug_assert!(!def_id.is_local());
tcx.upstream_monomorphizations(()).get(&def_id)
}
fn upstream_drop_glue_for_provider<'tcx>(
tcx: TyCtxt<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
) -> Option<CrateNum> {
if let Some(def_id) = tcx.lang_items().drop_in_place_fn() {
- tcx.upstream_monomorphizations_for(def_id).and_then(|monos| monos.get(&substs).cloned())
+ tcx.upstream_monomorphizations_for(def_id).and_then(|monos| monos.get(&args).cloned())
} else {
None
}
@@ -521,10 +512,10 @@ pub fn symbol_name_for_instance_in_crate<'tcx>(
instantiating_crate,
)
}
- ExportedSymbol::Generic(def_id, substs) => {
+ ExportedSymbol::Generic(def_id, args) => {
rustc_symbol_mangling::symbol_name_for_instance_in_crate(
tcx,
- Instance::new(def_id, substs),
+ Instance::new(def_id, args),
instantiating_crate,
)
}
@@ -533,7 +524,7 @@ pub fn symbol_name_for_instance_in_crate<'tcx>(
tcx,
ty::Instance {
def: ty::InstanceDef::ThreadLocalShim(def_id),
- substs: ty::InternalSubsts::empty(),
+ args: ty::GenericArgs::empty(),
},
instantiating_crate,
)
@@ -580,7 +571,7 @@ pub fn linking_symbol_name_for_instance_in_crate<'tcx>(
None
}
ExportedSymbol::NonGeneric(def_id) => Some(Instance::mono(tcx, def_id)),
- ExportedSymbol::Generic(def_id, substs) => Some(Instance::new(def_id, substs)),
+ ExportedSymbol::Generic(def_id, args) => Some(Instance::new(def_id, args)),
// DropGlue always use the Rust calling convention and thus follow the target's default
// symbol decoration scheme.
ExportedSymbol::DropGlue(..) => None,
diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs
index ececa29b2..f485af00b 100644
--- a/compiler/rustc_codegen_ssa/src/back/write.rs
+++ b/compiler/rustc_codegen_ssa/src/back/write.rs
@@ -123,7 +123,7 @@ pub struct ModuleConfig {
impl ModuleConfig {
fn new(
kind: ModuleKind,
- sess: &Session,
+ tcx: TyCtxt<'_>,
no_builtins: bool,
is_compiler_builtins: bool,
) -> ModuleConfig {
@@ -135,6 +135,7 @@ impl ModuleConfig {
};
}
+ let sess = tcx.sess;
let opt_level_and_size = if_regular!(Some(sess.opts.optimize), None);
let save_temps = sess.opts.cg.save_temps;
@@ -166,7 +167,7 @@ impl ModuleConfig {
// `#![no_builtins]` is assumed to not participate in LTO and
// instead goes on to generate object code.
EmitObj::Bitcode
- } else if need_bitcode_in_object(sess) {
+ } else if need_bitcode_in_object(tcx) {
EmitObj::ObjectCode(BitcodeSection::Full)
} else {
EmitObj::ObjectCode(BitcodeSection::None)
@@ -349,8 +350,6 @@ pub struct CodegenContext<B: WriteBackendMethods> {
/// Directory into which should the LLVM optimization remarks be written.
/// If `None`, they will be written to stderr.
pub remark_dir: Option<PathBuf>,
- /// Worker thread number
- pub worker: usize,
/// The incremental compilation session directory, or None if we are not
/// compiling incrementally
pub incr_comp_session_dir: Option<PathBuf>,
@@ -362,7 +361,7 @@ pub struct CodegenContext<B: WriteBackendMethods> {
impl<B: WriteBackendMethods> CodegenContext<B> {
pub fn create_diag_handler(&self) -> Handler {
- Handler::with_emitter(true, None, Box::new(self.diag_emitter.clone()))
+ Handler::with_emitter(Box::new(self.diag_emitter.clone()))
}
pub fn config(&self, kind: ModuleKind) -> &ModuleConfig {
@@ -376,38 +375,39 @@ impl<B: WriteBackendMethods> CodegenContext<B> {
fn generate_lto_work<B: ExtraBackendMethods>(
cgcx: &CodegenContext<B>,
- needs_fat_lto: Vec<FatLTOInput<B>>,
+ needs_fat_lto: Vec<FatLtoInput<B>>,
needs_thin_lto: Vec<(String, B::ThinBuffer)>,
import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
) -> Vec<(WorkItem<B>, u64)> {
let _prof_timer = cgcx.prof.generic_activity("codegen_generate_lto_work");
- let (lto_modules, copy_jobs) = if !needs_fat_lto.is_empty() {
+ if !needs_fat_lto.is_empty() {
assert!(needs_thin_lto.is_empty());
- let lto_module =
+ let module =
B::run_fat_lto(cgcx, needs_fat_lto, import_only_modules).unwrap_or_else(|e| e.raise());
- (vec![lto_module], vec![])
+ // We are adding a single work item, so the cost doesn't matter.
+ vec![(WorkItem::LTO(module), 0)]
} else {
assert!(needs_fat_lto.is_empty());
- B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules).unwrap_or_else(|e| e.raise())
- };
-
- lto_modules
- .into_iter()
- .map(|module| {
- let cost = module.cost();
- (WorkItem::LTO(module), cost)
- })
- .chain(copy_jobs.into_iter().map(|wp| {
- (
- WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen {
- name: wp.cgu_name.clone(),
- source: wp,
- }),
- 0,
- )
- }))
- .collect()
+ let (lto_modules, copy_jobs) = B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules)
+ .unwrap_or_else(|e| e.raise());
+ lto_modules
+ .into_iter()
+ .map(|module| {
+ let cost = module.cost();
+ (WorkItem::LTO(module), cost)
+ })
+ .chain(copy_jobs.into_iter().map(|wp| {
+ (
+ WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen {
+ name: wp.cgu_name.clone(),
+ source: wp,
+ }),
+ 0, // copying is very cheap
+ )
+ }))
+ .collect()
+ }
}
pub struct CompiledModules {
@@ -415,9 +415,10 @@ pub struct CompiledModules {
pub allocator_module: Option<CompiledModule>,
}
-fn need_bitcode_in_object(sess: &Session) -> bool {
+fn need_bitcode_in_object(tcx: TyCtxt<'_>) -> bool {
+ let sess = tcx.sess;
let requested_for_rlib = sess.opts.cg.embed_bitcode
- && sess.crate_types().contains(&CrateType::Rlib)
+ && tcx.crate_types().contains(&CrateType::Rlib)
&& sess.opts.output_types.contains_key(&OutputType::Exe);
let forced_by_target = sess.target.forces_embed_bitcode;
requested_for_rlib || forced_by_target
@@ -451,11 +452,11 @@ pub fn start_async_codegen<B: ExtraBackendMethods>(
let crate_info = CrateInfo::new(tcx, target_cpu);
let regular_config =
- ModuleConfig::new(ModuleKind::Regular, sess, no_builtins, is_compiler_builtins);
+ ModuleConfig::new(ModuleKind::Regular, tcx, no_builtins, is_compiler_builtins);
let metadata_config =
- ModuleConfig::new(ModuleKind::Metadata, sess, no_builtins, is_compiler_builtins);
+ ModuleConfig::new(ModuleKind::Metadata, tcx, no_builtins, is_compiler_builtins);
let allocator_config =
- ModuleConfig::new(ModuleKind::Allocator, sess, no_builtins, is_compiler_builtins);
+ ModuleConfig::new(ModuleKind::Allocator, tcx, no_builtins, is_compiler_builtins);
let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
let (codegen_worker_send, codegen_worker_receive) = channel();
@@ -709,7 +710,7 @@ impl<B: WriteBackendMethods> WorkItem<B> {
fn desc(short: &str, _long: &str, name: &str) -> String {
// The short label is three bytes, and is followed by a space. That
// leaves 11 bytes for the CGU name. How we obtain those 11 bytes
- // depends on the the CGU name form.
+ // depends on the CGU name form.
//
// - Non-incremental, e.g. `regex.f10ba03eb5ec7975-cgu.0`: the part
// before the `-cgu.0` is the same for every CGU, so use the
@@ -742,22 +743,32 @@ impl<B: WriteBackendMethods> WorkItem<B> {
}
match self {
- WorkItem::Optimize(m) => desc("opt", "optimize module {}", &m.name),
- WorkItem::CopyPostLtoArtifacts(m) => desc("cpy", "copy LTO artifacts for {}", &m.name),
- WorkItem::LTO(m) => desc("lto", "LTO module {}", m.name()),
+ WorkItem::Optimize(m) => desc("opt", "optimize module", &m.name),
+ WorkItem::CopyPostLtoArtifacts(m) => desc("cpy", "copy LTO artifacts for", &m.name),
+ WorkItem::LTO(m) => desc("lto", "LTO module", m.name()),
}
}
}
/// A result produced by the backend.
pub(crate) enum WorkItemResult<B: WriteBackendMethods> {
- Compiled(CompiledModule),
+ /// The backend has finished compiling a CGU, nothing more required.
+ Finished(CompiledModule),
+
+ /// The backend has finished compiling a CGU, which now needs linking
+ /// because `-Zcombine-cgu` was specified.
NeedsLink(ModuleCodegen<B::Module>),
- NeedsFatLTO(FatLTOInput<B>),
- NeedsThinLTO(String, B::ThinBuffer),
+
+ /// The backend has finished compiling a CGU, which now needs to go through
+ /// fat LTO.
+ NeedsFatLto(FatLtoInput<B>),
+
+ /// The backend has finished compiling a CGU, which now needs to go through
+ /// thin LTO.
+ NeedsThinLto(String, B::ThinBuffer),
}
-pub enum FatLTOInput<B: WriteBackendMethods> {
+pub enum FatLtoInput<B: WriteBackendMethods> {
Serialized { name: String, buffer: B::ModuleBuffer },
InMemory(ModuleCodegen<B::Module>),
}
@@ -846,7 +857,7 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>(
panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
});
}
- Ok(WorkItemResult::NeedsThinLTO(name, thin_buffer))
+ Ok(WorkItemResult::NeedsThinLto(name, thin_buffer))
}
ComputedLtoType::Fat => match bitcode {
Some(path) => {
@@ -854,9 +865,9 @@ fn execute_optimize_work_item<B: ExtraBackendMethods>(
fs::write(&path, buffer.data()).unwrap_or_else(|e| {
panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
});
- Ok(WorkItemResult::NeedsFatLTO(FatLTOInput::Serialized { name, buffer }))
+ Ok(WorkItemResult::NeedsFatLto(FatLtoInput::Serialized { name, buffer }))
}
- None => Ok(WorkItemResult::NeedsFatLTO(FatLTOInput::InMemory(module))),
+ None => Ok(WorkItemResult::NeedsFatLto(FatLtoInput::InMemory(module))),
},
}
}
@@ -906,7 +917,7 @@ fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
load_from_incr_comp_dir(dwarf_obj_out, &saved_dwarf_object_file)
});
- WorkItemResult::Compiled(CompiledModule {
+ WorkItemResult::Finished(CompiledModule {
name: module.name,
kind: ModuleKind::Regular,
object,
@@ -936,7 +947,7 @@ fn finish_intra_module_work<B: ExtraBackendMethods>(
|| module.kind == ModuleKind::Allocator
{
let module = unsafe { B::codegen(cgcx, &diag_handler, module, module_config)? };
- Ok(WorkItemResult::Compiled(module))
+ Ok(WorkItemResult::Finished(module))
} else {
Ok(WorkItemResult::NeedsLink(module))
}
@@ -987,10 +998,15 @@ struct Diagnostic {
}
#[derive(PartialEq, Clone, Copy, Debug)]
-enum MainThreadWorkerState {
+enum MainThreadState {
+ /// Doing nothing.
Idle,
+
+ /// Doing codegen, i.e. MIR-to-LLVM-IR conversion.
Codegenning,
- LLVMing,
+
+ /// Idle, but lending the compiler process's Token to an LLVM thread so it can do useful work.
+ Lending,
}
fn start_executing_work<B: ExtraBackendMethods>(
@@ -1078,7 +1094,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
};
let cgcx = CodegenContext::<B> {
- crate_types: sess.crate_types().to_vec(),
+ crate_types: tcx.crate_types().to_vec(),
each_linked_rlib_for_lto,
lto: sess.lto(),
fewer_names: sess.fewer_names(),
@@ -1089,7 +1105,6 @@ fn start_executing_work<B: ExtraBackendMethods>(
exported_symbols,
remark: sess.opts.cg.remark.clone(),
remark_dir,
- worker: 0,
incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(),
coordinator_send,
@@ -1242,7 +1257,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
// Each LLVM module is automatically sent back to the coordinator for LTO if
// necessary. There's already optimizations in place to avoid sending work
// back to the coordinator if LTO isn't requested.
- return B::spawn_thread(cgcx.time_trace, move || {
+ return B::spawn_named_thread(cgcx.time_trace, "coordinator".to_string(), move || {
let mut worker_id_counter = 0;
let mut free_worker_ids = Vec::new();
let mut get_worker_id = |free_worker_ids: &mut Vec<usize>| {
@@ -1285,10 +1300,19 @@ fn start_executing_work<B: ExtraBackendMethods>(
// the implicit Token the compiler process owns no matter what.
let mut tokens = Vec::new();
- let mut main_thread_worker_state = MainThreadWorkerState::Idle;
- let mut running = 0;
+ let mut main_thread_state = MainThreadState::Idle;
+
+ // How many LLVM worker threads are running while holding a Token. This
+ // *excludes* any that the main thread is lending a Token to.
+ let mut running_with_own_token = 0;
+
+ // How many LLVM worker threads are running in total. This *includes*
+ // any that the main thread is lending a Token to.
+ let running_with_any_token = |main_thread_state, running_with_own_token| {
+ running_with_own_token
+ + if main_thread_state == MainThreadState::Lending { 1 } else { 0 }
+ };
- let prof = &cgcx.prof;
let mut llvm_start_time: Option<VerboseTimingGuard<'_>> = None;
// Run the message loop while there's still anything that needs message
@@ -1296,66 +1320,62 @@ fn start_executing_work<B: ExtraBackendMethods>(
// wait for all existing work to finish, so many of the conditions here
// only apply if codegen hasn't been aborted as they represent pending
// work to be done.
- while codegen_state == Ongoing
- || running > 0
- || main_thread_worker_state == MainThreadWorkerState::LLVMing
- || (codegen_state == Completed
- && !(work_items.is_empty()
- && needs_fat_lto.is_empty()
- && needs_thin_lto.is_empty()
- && lto_import_only_modules.is_empty()
- && main_thread_worker_state == MainThreadWorkerState::Idle))
- {
+ loop {
// While there are still CGUs to be codegened, the coordinator has
// to decide how to utilize the compiler processes implicit Token:
// For codegenning more CGU or for running them through LLVM.
if codegen_state == Ongoing {
- if main_thread_worker_state == MainThreadWorkerState::Idle {
+ if main_thread_state == MainThreadState::Idle {
// Compute the number of workers that will be running once we've taken as many
// items from the work queue as we can, plus one for the main thread. It's not
- // critically important that we use this instead of just `running`, but it
- // prevents the `queue_full_enough` heuristic from fluctuating just because a
- // worker finished up and we decreased the `running` count, even though we're
- // just going to increase it right after this when we put a new worker to work.
- let extra_tokens = tokens.len().checked_sub(running).unwrap();
+ // critically important that we use this instead of just
+ // `running_with_own_token`, but it prevents the `queue_full_enough` heuristic
+ // from fluctuating just because a worker finished up and we decreased the
+ // `running_with_own_token` count, even though we're just going to increase it
+ // right after this when we put a new worker to work.
+ let extra_tokens = tokens.len().checked_sub(running_with_own_token).unwrap();
let additional_running = std::cmp::min(extra_tokens, work_items.len());
- let anticipated_running = running + additional_running + 1;
+ let anticipated_running = running_with_own_token + additional_running + 1;
if !queue_full_enough(work_items.len(), anticipated_running) {
// The queue is not full enough, process more codegen units:
if codegen_worker_send.send(CguMessage).is_err() {
panic!("Could not send CguMessage to main thread")
}
- main_thread_worker_state = MainThreadWorkerState::Codegenning;
+ main_thread_state = MainThreadState::Codegenning;
} else {
// The queue is full enough to not let the worker
// threads starve. Use the implicit Token to do some
// LLVM work too.
let (item, _) =
work_items.pop().expect("queue empty - queue_full_enough() broken?");
- let cgcx = CodegenContext {
- worker: get_worker_id(&mut free_worker_ids),
- ..cgcx.clone()
- };
- maybe_start_llvm_timer(
- prof,
- cgcx.config(item.module_kind()),
+ main_thread_state = MainThreadState::Lending;
+ spawn_work(
+ &cgcx,
&mut llvm_start_time,
+ get_worker_id(&mut free_worker_ids),
+ item,
);
- main_thread_worker_state = MainThreadWorkerState::LLVMing;
- spawn_work(cgcx, item);
}
}
} else if codegen_state == Completed {
- // If we've finished everything related to normal codegen
- // then it must be the case that we've got some LTO work to do.
- // Perform the serial work here of figuring out what we're
- // going to LTO and then push a bunch of work items onto our
- // queue to do LTO
- if work_items.is_empty()
- && running == 0
- && main_thread_worker_state == MainThreadWorkerState::Idle
+ if running_with_any_token(main_thread_state, running_with_own_token) == 0
+ && work_items.is_empty()
{
+ // All codegen work is done. Do we have LTO work to do?
+ if needs_fat_lto.is_empty()
+ && needs_thin_lto.is_empty()
+ && lto_import_only_modules.is_empty()
+ {
+ // Nothing more to do!
+ break;
+ }
+
+ // We have LTO work to do. Perform the serial work here of
+ // figuring out what we're going to LTO and then push a
+ // bunch of work items onto our queue to do LTO. This all
+ // happens on the coordinator thread but it's very quick so
+ // we don't worry about tokens.
assert!(!started_lto);
started_lto = true;
@@ -1379,20 +1399,16 @@ fn start_executing_work<B: ExtraBackendMethods>(
// In this branch, we know that everything has been codegened,
// so it's just a matter of determining whether the implicit
// Token is free to use for LLVM work.
- match main_thread_worker_state {
- MainThreadWorkerState::Idle => {
+ match main_thread_state {
+ MainThreadState::Idle => {
if let Some((item, _)) = work_items.pop() {
- let cgcx = CodegenContext {
- worker: get_worker_id(&mut free_worker_ids),
- ..cgcx.clone()
- };
- maybe_start_llvm_timer(
- prof,
- cgcx.config(item.module_kind()),
+ main_thread_state = MainThreadState::Lending;
+ spawn_work(
+ &cgcx,
&mut llvm_start_time,
+ get_worker_id(&mut free_worker_ids),
+ item,
);
- main_thread_worker_state = MainThreadWorkerState::LLVMing;
- spawn_work(cgcx, item);
} else {
// There is no unstarted work, so let the main thread
// take over for a running worker. Otherwise the
@@ -1400,16 +1416,16 @@ fn start_executing_work<B: ExtraBackendMethods>(
// We reduce the `running` counter by one. The
// `tokens.truncate()` below will take care of
// giving the Token back.
- debug_assert!(running > 0);
- running -= 1;
- main_thread_worker_state = MainThreadWorkerState::LLVMing;
+ debug_assert!(running_with_own_token > 0);
+ running_with_own_token -= 1;
+ main_thread_state = MainThreadState::Lending;
}
}
- MainThreadWorkerState::Codegenning => bug!(
+ MainThreadState::Codegenning => bug!(
"codegen worker should not be codegenning after \
codegen was already completed"
),
- MainThreadWorkerState::LLVMing => {
+ MainThreadState::Lending => {
// Already making good use of that token
}
}
@@ -1417,35 +1433,39 @@ fn start_executing_work<B: ExtraBackendMethods>(
// Don't queue up any more work if codegen was aborted, we're
// just waiting for our existing children to finish.
assert!(codegen_state == Aborted);
+ if running_with_any_token(main_thread_state, running_with_own_token) == 0 {
+ break;
+ }
}
// Spin up what work we can, only doing this while we've got available
// parallelism slots and work left to spawn.
- while codegen_state != Aborted && !work_items.is_empty() && running < tokens.len() {
- let (item, _) = work_items.pop().unwrap();
-
- maybe_start_llvm_timer(prof, cgcx.config(item.module_kind()), &mut llvm_start_time);
-
- let cgcx =
- CodegenContext { worker: get_worker_id(&mut free_worker_ids), ..cgcx.clone() };
-
- spawn_work(cgcx, item);
- running += 1;
+ if codegen_state != Aborted {
+ while !work_items.is_empty() && running_with_own_token < tokens.len() {
+ let (item, _) = work_items.pop().unwrap();
+ spawn_work(
+ &cgcx,
+ &mut llvm_start_time,
+ get_worker_id(&mut free_worker_ids),
+ item,
+ );
+ running_with_own_token += 1;
+ }
}
- // Relinquish accidentally acquired extra tokens
- tokens.truncate(running);
+ // Relinquish accidentally acquired extra tokens.
+ tokens.truncate(running_with_own_token);
// If a thread exits successfully then we drop a token associated
- // with that worker and update our `running` count. We may later
- // re-acquire a token to continue running more work. We may also not
- // actually drop a token here if the worker was running with an
- // "ephemeral token"
+ // with that worker and update our `running_with_own_token` count.
+ // We may later re-acquire a token to continue running more work.
+ // We may also not actually drop a token here if the worker was
+ // running with an "ephemeral token".
let mut free_worker = |worker_id| {
- if main_thread_worker_state == MainThreadWorkerState::LLVMing {
- main_thread_worker_state = MainThreadWorkerState::Idle;
+ if main_thread_state == MainThreadState::Lending {
+ main_thread_state = MainThreadState::Idle;
} else {
- running -= 1;
+ running_with_own_token -= 1;
}
free_worker_ids.push(worker_id);
@@ -1461,17 +1481,17 @@ fn start_executing_work<B: ExtraBackendMethods>(
Ok(token) => {
tokens.push(token);
- if main_thread_worker_state == MainThreadWorkerState::LLVMing {
+ if main_thread_state == MainThreadState::Lending {
// If the main thread token is used for LLVM work
// at the moment, we turn that thread into a regular
// LLVM worker thread, so the main thread is free
// to react to codegen demand.
- main_thread_worker_state = MainThreadWorkerState::Idle;
- running += 1;
+ main_thread_state = MainThreadState::Idle;
+ running_with_own_token += 1;
}
}
Err(e) => {
- let msg = &format!("failed to acquire jobserver token: {}", e);
+ let msg = &format!("failed to acquire jobserver token: {e}");
shared_emitter.fatal(msg);
codegen_state = Aborted;
}
@@ -1496,16 +1516,16 @@ fn start_executing_work<B: ExtraBackendMethods>(
if !cgcx.opts.unstable_opts.no_parallel_llvm {
helper.request_token();
}
- assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
- main_thread_worker_state = MainThreadWorkerState::Idle;
+ assert_eq!(main_thread_state, MainThreadState::Codegenning);
+ main_thread_state = MainThreadState::Idle;
}
Message::CodegenComplete => {
if codegen_state != Aborted {
codegen_state = Completed;
}
- assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
- main_thread_worker_state = MainThreadWorkerState::Idle;
+ assert_eq!(main_thread_state, MainThreadState::Codegenning);
+ main_thread_state = MainThreadState::Idle;
}
// If codegen is aborted that means translation was aborted due
@@ -1513,7 +1533,8 @@ fn start_executing_work<B: ExtraBackendMethods>(
// to exit as soon as possible, but we want to make sure all
// existing work has finished. Flag codegen as being done, and
// then conditions above will ensure no more work is spawned but
- // we'll keep executing this loop until `running` hits 0.
+ // we'll keep executing this loop until `running_with_own_token`
+ // hits 0.
Message::CodegenAborted => {
codegen_state = Aborted;
}
@@ -1522,9 +1543,10 @@ fn start_executing_work<B: ExtraBackendMethods>(
free_worker(worker_id);
match result {
- Ok(WorkItemResult::Compiled(compiled_module)) => {
+ Ok(WorkItemResult::Finished(compiled_module)) => {
match compiled_module.kind {
ModuleKind::Regular => {
+ assert!(needs_link.is_empty());
compiled_modules.push(compiled_module);
}
ModuleKind::Allocator => {
@@ -1535,14 +1557,17 @@ fn start_executing_work<B: ExtraBackendMethods>(
}
}
Ok(WorkItemResult::NeedsLink(module)) => {
+ assert!(compiled_modules.is_empty());
needs_link.push(module);
}
- Ok(WorkItemResult::NeedsFatLTO(fat_lto_input)) => {
+ Ok(WorkItemResult::NeedsFatLto(fat_lto_input)) => {
assert!(!started_lto);
+ assert!(needs_thin_lto.is_empty());
needs_fat_lto.push(fat_lto_input);
}
- Ok(WorkItemResult::NeedsThinLTO(name, thin_buffer)) => {
+ Ok(WorkItemResult::NeedsThinLto(name, thin_buffer)) => {
assert!(!started_lto);
+ assert!(needs_fat_lto.is_empty());
needs_thin_lto.push((name, thin_buffer));
}
Err(Some(WorkerFatalError)) => {
@@ -1560,9 +1585,9 @@ fn start_executing_work<B: ExtraBackendMethods>(
Message::AddImportOnlyModule { module_data, work_product } => {
assert!(!started_lto);
assert_eq!(codegen_state, Ongoing);
- assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
+ assert_eq!(main_thread_state, MainThreadState::Codegenning);
lto_import_only_modules.push((module_data, work_product));
- main_thread_worker_state = MainThreadWorkerState::Idle;
+ main_thread_state = MainThreadState::Idle;
}
}
}
@@ -1595,7 +1620,8 @@ fn start_executing_work<B: ExtraBackendMethods>(
modules: compiled_modules,
allocator_module: compiled_allocator_module,
})
- });
+ })
+ .expect("failed to spawn coordinator thread");
// A heuristic that determines if we have enough LLVM WorkItems in the
// queue so that the main thread can do LLVM work instead of codegen
@@ -1653,23 +1679,24 @@ fn start_executing_work<B: ExtraBackendMethods>(
let quarter_of_workers = workers_running - 3 * workers_running / 4;
items_in_queue > 0 && items_in_queue >= quarter_of_workers
}
-
- fn maybe_start_llvm_timer<'a>(
- prof: &'a SelfProfilerRef,
- config: &ModuleConfig,
- llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
- ) {
- if config.time_module && llvm_start_time.is_none() {
- *llvm_start_time = Some(prof.verbose_generic_activity("LLVM_passes"));
- }
- }
}
/// `FatalError` is explicitly not `Send`.
#[must_use]
pub struct WorkerFatalError;
-fn spawn_work<B: ExtraBackendMethods>(cgcx: CodegenContext<B>, work: WorkItem<B>) {
+fn spawn_work<'a, B: ExtraBackendMethods>(
+ cgcx: &'a CodegenContext<B>,
+ llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
+ worker_id: usize,
+ work: WorkItem<B>,
+) {
+ if cgcx.config(work.module_kind()).time_module && llvm_start_time.is_none() {
+ *llvm_start_time = Some(cgcx.prof.verbose_generic_activity("LLVM_passes"));
+ }
+
+ let cgcx = cgcx.clone();
+
B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || {
// Set up a destructor which will fire off a message that we're done as
// we exit.
@@ -1692,11 +1719,8 @@ fn spawn_work<B: ExtraBackendMethods>(cgcx: CodegenContext<B>, work: WorkItem<B>
}
}
- let mut bomb = Bomb::<B> {
- coordinator_send: cgcx.coordinator_send.clone(),
- result: None,
- worker_id: cgcx.worker,
- };
+ let mut bomb =
+ Bomb::<B> { coordinator_send: cgcx.coordinator_send.clone(), result: None, worker_id };
// Execute the work itself, and if it finishes successfully then flag
// ourselves as a success as well.
@@ -1728,7 +1752,7 @@ fn spawn_work<B: ExtraBackendMethods>(cgcx: CodegenContext<B>, work: WorkItem<B>
})
};
})
- .expect("failed to spawn thread");
+ .expect("failed to spawn work thread");
}
enum SharedEmitterMessage {
@@ -1945,6 +1969,10 @@ impl<B: ExtraBackendMethods> OngoingCodegen<B> {
self.backend.print_pass_timings()
}
+ if sess.print_llvm_stats() {
+ self.backend.print_statistics()
+ }
+
(
CodegenResults {
metadata: self.metadata,
@@ -1958,19 +1986,6 @@ impl<B: ExtraBackendMethods> OngoingCodegen<B> {
)
}
- pub fn submit_pre_codegened_module_to_llvm(
- &self,
- tcx: TyCtxt<'_>,
- module: ModuleCodegen<B::Module>,
- ) {
- self.wait_for_signal_to_codegen_item();
- self.check_for_errors(tcx.sess);
-
- // These are generally cheap and won't throw off scheduling.
- let cost = 0;
- submit_codegened_module_to_llvm(&self.backend, &self.coordinator.sender, module, cost);
- }
-
pub fn codegen_finished(&self, tcx: TyCtxt<'_>) {
self.wait_for_signal_to_codegen_item();
self.check_for_errors(tcx.sess);
@@ -2036,8 +2051,8 @@ pub fn submit_pre_lto_module_to_llvm<B: ExtraBackendMethods>(
})));
}
-pub fn pre_lto_bitcode_filename(module_name: &str) -> String {
- format!("{}.{}", module_name, PRE_LTO_BC_EXT)
+fn pre_lto_bitcode_filename(module_name: &str) -> String {
+ format!("{module_name}.{PRE_LTO_BC_EXT}")
}
fn msvc_imps_needed(tcx: TyCtxt<'_>) -> bool {
@@ -2050,7 +2065,7 @@ fn msvc_imps_needed(tcx: TyCtxt<'_>) -> bool {
);
tcx.sess.target.is_like_windows &&
- tcx.sess.crate_types().iter().any(|ct| *ct == CrateType::Rlib) &&
+ tcx.crate_types().iter().any(|ct| *ct == CrateType::Rlib) &&
// ThinLTO can't handle this workaround in all cases, so we don't
// emit the `__imp_` symbols. Instead we make them unnecessary by disallowing
// dynamic linking when linker plugin LTO is enabled.
diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs
index 9133601ec..aa003e4e8 100644
--- a/compiler/rustc_codegen_ssa/src/base.rs
+++ b/compiler/rustc_codegen_ssa/src/base.rs
@@ -38,6 +38,7 @@ use rustc_span::symbol::sym;
use rustc_span::Symbol;
use rustc_target::abi::{Align, FIRST_VARIANT};
+use std::cmp;
use std::collections::BTreeSet;
use std::time::{Duration, Instant};
@@ -164,50 +165,27 @@ pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx.tcx().vtable_trait_upcasting_coercion_new_vptr_slot((source, target));
if let Some(entry_idx) = vptr_entry_idx {
- let ptr_ty = cx.type_i8p();
+ let ptr_ty = cx.type_ptr();
let ptr_align = cx.tcx().data_layout.pointer_align.abi;
- let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind);
- let llvtable = bx.pointercast(old_info, bx.type_ptr_to(ptr_ty));
let gep = bx.inbounds_gep(
ptr_ty,
- llvtable,
+ old_info,
&[bx.const_usize(u64::try_from(entry_idx).unwrap())],
);
let new_vptr = bx.load(ptr_ty, gep, ptr_align);
bx.nonnull_metadata(new_vptr);
// VTable loads are invariant.
bx.set_invariant_load(new_vptr);
- bx.pointercast(new_vptr, vtable_ptr_ty)
+ new_vptr
} else {
old_info
}
}
- (_, &ty::Dynamic(ref data, _, target_dyn_kind)) => {
- let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind);
- cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), vtable_ptr_ty)
- }
+ (_, &ty::Dynamic(ref data, _, _)) => meth::get_vtable(cx, source, data.principal()),
_ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
}
}
-// Returns the vtable pointer type of a `dyn` or `dyn*` type
-fn vtable_ptr_ty<'tcx, Cx: CodegenMethods<'tcx>>(
- cx: &Cx,
- target: Ty<'tcx>,
- kind: ty::DynKind,
-) -> <Cx as BackendTypes>::Type {
- cx.scalar_pair_element_backend_type(
- cx.layout_of(match kind {
- // vtable is the second field of `*mut dyn Trait`
- ty::Dyn => Ty::new_mut_ptr(cx.tcx(), target),
- // vtable is the second field of `dyn* Trait`
- ty::DynStar => target,
- }),
- 1,
- true,
- )
-}
-
/// Coerces `src` to `dst_ty`. `src_ty` must be a pointer.
pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
@@ -221,8 +199,7 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
(&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
| (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
assert_eq!(bx.cx().type_is_sized(a), old_info.is_none());
- let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
- (bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, old_info))
+ (src, unsized_info(bx, a, b, old_info))
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
@@ -247,11 +224,7 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
assert_eq!(result, None);
result = Some(unsize_ptr(bx, src, src_f.ty, dst_f.ty, old_info));
}
- let (lldata, llextra) = result.unwrap();
- let lldata_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true);
- let llextra_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true);
- // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
- (bx.bitcast(lldata, lldata_ty), bx.bitcast(llextra, llextra_ty))
+ result.unwrap()
}
_ => bug!("unsize_ptr: called on bad types"),
}
@@ -270,11 +243,9 @@ pub fn cast_to_dyn_star<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
matches!(dst_ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
"destination type must be a dyn*"
);
- // FIXME(dyn-star): We can remove this when all supported LLVMs use opaque ptrs only.
- let unit_ptr = bx.cx().type_ptr_to(bx.cx().type_struct(&[], false));
let src = match bx.cx().type_kind(bx.cx().backend_type(src_ty_and_layout)) {
- TypeKind::Pointer => bx.pointercast(src, unit_ptr),
- TypeKind::Integer => bx.inttoptr(src, unit_ptr),
+ TypeKind::Pointer => src,
+ TypeKind::Integer => bx.inttoptr(src, bx.type_ptr()),
// FIXME(dyn-star): We probably have to do a bitcast first, then inttoptr.
kind => bug!("unexpected TypeKind for left-hand side of `dyn*` cast: {kind:?}"),
};
@@ -397,11 +368,6 @@ pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
if flags == MemFlags::empty()
&& let Some(bty) = bx.cx().scalar_copy_backend_type(layout)
{
- // I look forward to only supporting opaque pointers
- let pty = bx.type_ptr_to(bty);
- let src = bx.pointercast(src, pty);
- let dst = bx.pointercast(dst, pty);
-
let temp = bx.load(bty, src, src_align);
bx.store(temp, dst, dst_align);
} else {
@@ -455,7 +421,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
// The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
// depending on whether the target needs `argc` and `argv` to be passed in.
let llfty = if cx.sess().target.main_needs_argc_argv {
- cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int())
+ cx.type_func(&[cx.type_int(), cx.type_ptr()], cx.type_int())
} else {
cx.type_func(&[], cx.type_int())
};
@@ -489,7 +455,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx.insert_reference_to_gdb_debug_scripts_section_global();
let isize_ty = cx.type_isize();
- let i8pp_ty = cx.type_ptr_to(cx.type_i8p());
+ let ptr_ty = cx.type_ptr();
let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx);
let (start_fn, start_ty, args) = if let EntryFnType::Main { sigpipe } = entry_type {
@@ -499,7 +465,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx.tcx(),
ty::ParamEnv::reveal_all(),
start_def_id,
- cx.tcx().mk_substs(&[main_ret_ty.into()]),
+ cx.tcx().mk_args(&[main_ret_ty.into()]),
)
.unwrap()
.unwrap(),
@@ -508,12 +474,11 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let i8_ty = cx.type_i8();
let arg_sigpipe = bx.const_u8(sigpipe);
- let start_ty =
- cx.type_func(&[cx.val_ty(rust_main), isize_ty, i8pp_ty, i8_ty], isize_ty);
+ let start_ty = cx.type_func(&[cx.val_ty(rust_main), isize_ty, ptr_ty, i8_ty], isize_ty);
(start_fn, start_ty, vec![rust_main, arg_argc, arg_argv, arg_sigpipe])
} else {
debug!("using user-defined start fn");
- let start_ty = cx.type_func(&[isize_ty, i8pp_ty], isize_ty);
+ let start_ty = cx.type_func(&[isize_ty, ptr_ty], isize_ty);
(rust_main, start_ty, vec![arg_argc, arg_argv])
};
@@ -540,7 +505,7 @@ fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
} else {
// The Rust start function doesn't need `argc` and `argv`, so just pass zeros.
let arg_argc = bx.const_int(cx.type_int(), 0);
- let arg_argv = bx.const_null(cx.type_ptr_to(cx.type_i8p()));
+ let arg_argv = bx.const_null(cx.type_ptr());
(arg_argc, arg_argv)
}
}
@@ -663,9 +628,16 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
)
});
- ongoing_codegen.submit_pre_codegened_module_to_llvm(
- tcx,
+ ongoing_codegen.wait_for_signal_to_codegen_item();
+ ongoing_codegen.check_for_errors(tcx.sess);
+
+ // These modules are generally cheap and won't throw off scheduling.
+ let cost = 0;
+ submit_codegened_module_to_llvm(
+ &backend,
+ &ongoing_codegen.coordinator.sender,
ModuleCodegen { name: llmod_id, module_llvm, kind: ModuleKind::Allocator },
+ cost,
);
}
@@ -682,10 +654,10 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
// are large size variations, this can reduce memory usage significantly.
let codegen_units: Vec<_> = {
let mut sorted_cgus = codegen_units.iter().collect::<Vec<_>>();
- sorted_cgus.sort_by_cached_key(|cgu| cgu.size_estimate());
+ sorted_cgus.sort_by_key(|cgu| cmp::Reverse(cgu.size_estimate()));
let (first_half, second_half) = sorted_cgus.split_at(sorted_cgus.len() / 2);
- second_half.iter().rev().interleave(first_half).copied().collect()
+ first_half.iter().interleave(second_half.iter().rev()).copied().collect()
};
// Calculate the CGU reuse
@@ -760,7 +732,6 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
module,
cost,
);
- false
}
CguReuse::PreLto => {
submit_pre_lto_module_to_llvm(
@@ -772,7 +743,6 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
source: cgu.previous_work_product(tcx),
},
);
- true
}
CguReuse::PostLto => {
submit_post_lto_module_to_llvm(
@@ -783,9 +753,8 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
source: cgu.previous_work_product(tcx),
},
);
- true
}
- };
+ }
}
ongoing_codegen.codegen_finished(tcx);
@@ -810,18 +779,13 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
impl CrateInfo {
pub fn new(tcx: TyCtxt<'_>, target_cpu: String) -> CrateInfo {
- let exported_symbols = tcx
- .sess
- .crate_types()
+ let crate_types = tcx.crate_types().to_vec();
+ let exported_symbols = crate_types
.iter()
.map(|&c| (c, crate::back::linker::exported_symbols(tcx, c)))
.collect();
- let linked_symbols = tcx
- .sess
- .crate_types()
- .iter()
- .map(|&c| (c, crate::back::linker::linked_symbols(tcx, c)))
- .collect();
+ let linked_symbols =
+ crate_types.iter().map(|&c| (c, crate::back::linker::linked_symbols(tcx, c))).collect();
let local_crate_name = tcx.crate_name(LOCAL_CRATE);
let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
let subsystem = attr::first_attr_value_str_by_name(crate_attrs, sym::windows_subsystem);
@@ -860,6 +824,7 @@ impl CrateInfo {
let mut info = CrateInfo {
target_cpu,
+ crate_types,
exported_symbols,
linked_symbols,
local_crate_name,
@@ -947,7 +912,7 @@ impl CrateInfo {
});
}
- let embed_visualizers = tcx.sess.crate_types().iter().any(|&crate_type| match crate_type {
+ let embed_visualizers = tcx.crate_types().iter().any(|&crate_type| match crate_type {
CrateType::Executable | CrateType::Dylib | CrateType::Cdylib => {
// These are crate types for which we invoke the linker and can embed
// NatVis visualizers.
@@ -1044,7 +1009,7 @@ fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguR
match compute_per_cgu_lto_type(
&tcx.sess.lto(),
&tcx.sess.opts,
- &tcx.sess.crate_types(),
+ tcx.crate_types(),
ModuleKind::Regular,
) {
ComputedLtoType::No => CguReuse::PostLto,
diff --git a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
index d6c230127..f6936c80b 100644
--- a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
+++ b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
@@ -1,4 +1,4 @@
-use rustc_ast::{ast, MetaItemKind, NestedMetaItem};
+use rustc_ast::{ast, attr, MetaItemKind, NestedMetaItem};
use rustc_attr::{list_contains_name, InlineAttr, InstructionSetAttr, OptimizeAttr};
use rustc_errors::struct_span_err;
use rustc_hir as hir;
@@ -60,6 +60,14 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::TRACK_CALLER;
}
+ // When `no_builtins` is applied at the crate level, we should add the
+ // `no-builtins` attribute to each function to ensure it takes effect in LTO.
+ let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
+ let no_builtins = attr::contains_name(crate_attrs, sym::no_builtins);
+ if no_builtins {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_BUILTINS;
+ }
+
let supported_target_features = tcx.supported_target_features(LOCAL_CRATE);
let mut inline_span = None;
@@ -207,14 +215,19 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs {
}
sym::thread_local => codegen_fn_attrs.flags |= CodegenFnAttrFlags::THREAD_LOCAL,
sym::track_caller => {
- if !tcx.is_closure(did.to_def_id())
+ let is_closure = tcx.is_closure(did.to_def_id());
+
+ if !is_closure
&& let Some(fn_sig) = fn_sig()
&& fn_sig.skip_binder().abi() != abi::Abi::Rust
{
struct_span_err!(tcx.sess, attr.span, E0737, "`#[track_caller]` requires Rust ABI")
.emit();
}
- if tcx.is_closure(did.to_def_id()) && !tcx.features().closure_track_caller {
+ if is_closure
+ && !tcx.features().closure_track_caller
+ && !attr.span.allows_unstable(sym::closure_track_caller)
+ {
feature_err(
&tcx.sess.parse_sess,
sym::closure_track_caller,
@@ -493,7 +506,22 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs {
});
// #73631: closures inherit `#[target_feature]` annotations
- if tcx.features().target_feature_11 && tcx.is_closure(did.to_def_id()) {
+ //
+ // If this closure is marked `#[inline(always)]`, simply skip adding `#[target_feature]`.
+ //
+ // At this point, `unsafe` has already been checked and `#[target_feature]` only affects codegen.
+ // Emitting both `#[inline(always)]` and `#[target_feature]` can potentially result in an
+ // ICE, because LLVM errors when the function fails to be inlined due to a target feature
+ // mismatch.
+ //
+ // Using `#[inline(always)]` implies that this closure will most likely be inlined into
+ // its parent function, which effectively inherits the features anyway. Boxing this closure
+ // would result in this closure being compiled without the inherited target features, but this
+ // is probably a poor usage of `#[inline(always)]` and easily avoided by not using the attribute.
+ if tcx.features().target_feature_11
+ && tcx.is_closure(did.to_def_id())
+ && codegen_fn_attrs.inline != InlineAttr::Always
+ {
let owner_id = tcx.parent(did.to_def_id());
if tcx.def_kind(owner_id).has_codegen_attrs() {
codegen_fn_attrs
diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
index e91f7b86e..067c824ab 100644
--- a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
+++ b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
@@ -17,8 +17,8 @@ use rustc_hir::def_id::DefId;
use rustc_hir::definitions::{DefPathData, DefPathDataName, DisambiguatedDefPathData};
use rustc_hir::{AsyncGeneratorKind, GeneratorKind, Mutability};
use rustc_middle::ty::layout::{IntegerExt, TyAndLayout};
-use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
use rustc_middle::ty::{self, ExistentialProjection, ParamEnv, Ty, TyCtxt};
+use rustc_middle::ty::{GenericArgKind, GenericArgsRef};
use rustc_target::abi::Integer;
use smallvec::SmallVec;
@@ -77,7 +77,7 @@ fn push_debuginfo_type_name<'tcx>(
ty::Uint(uint_ty) => output.push_str(uint_ty.name_str()),
ty::Float(float_ty) => output.push_str(float_ty.name_str()),
ty::Foreign(def_id) => push_item_name(tcx, def_id, qualified, output),
- ty::Adt(def, substs) => {
+ ty::Adt(def, args) => {
// `layout_for_cpp_like_fallback` will be `Some` if we want to use the fallback encoding.
let layout_for_cpp_like_fallback = if cpp_like_debuginfo && def.is_enum() {
match tcx.layout_of(ParamEnv::reveal_all().and(t)) {
@@ -106,14 +106,14 @@ fn push_debuginfo_type_name<'tcx>(
ty_and_layout,
&|output, visited| {
push_item_name(tcx, def.did(), true, output);
- push_generic_params_internal(tcx, substs, output, visited);
+ push_generic_params_internal(tcx, args, output, visited);
},
output,
visited,
);
} else {
push_item_name(tcx, def.did(), qualified, output);
- push_generic_params_internal(tcx, substs, output, visited);
+ push_generic_params_internal(tcx, args, output, visited);
}
}
ty::Tuple(component_types) => {
@@ -238,7 +238,7 @@ fn push_debuginfo_type_name<'tcx>(
tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), principal);
push_item_name(tcx, principal.def_id, qualified, output);
let principal_has_generic_params =
- push_generic_params_internal(tcx, principal.substs, output, visited);
+ push_generic_params_internal(tcx, principal.args, output, visited);
let projection_bounds: SmallVec<[_; 4]> = trait_data
.projection_bounds()
@@ -393,7 +393,7 @@ fn push_debuginfo_type_name<'tcx>(
// processing
visited.remove(&t);
}
- ty::Closure(def_id, substs) | ty::Generator(def_id, substs, ..) => {
+ ty::Closure(def_id, args) | ty::Generator(def_id, args, ..) => {
// Name will be "{closure_env#0}<T1, T2, ...>", "{generator_env#0}<T1, T2, ...>", or
// "{async_fn_env#0}<T1, T2, ...>", etc.
// In the case of cpp-like debuginfo, the name additionally gets wrapped inside of
@@ -403,18 +403,18 @@ fn push_debuginfo_type_name<'tcx>(
msvc_enum_fallback(
ty_and_layout,
&|output, visited| {
- push_closure_or_generator_name(tcx, def_id, substs, true, output, visited);
+ push_closure_or_generator_name(tcx, def_id, args, true, output, visited);
},
output,
visited,
);
} else {
- push_closure_or_generator_name(tcx, def_id, substs, qualified, output, visited);
+ push_closure_or_generator_name(tcx, def_id, args, qualified, output, visited);
}
}
// Type parameters from polymorphized functions.
ty::Param(_) => {
- write!(output, "{:?}", t).unwrap();
+ write!(output, "{t:?}").unwrap();
}
ty::Error(_)
| ty::Infer(_)
@@ -516,7 +516,7 @@ pub fn compute_debuginfo_vtable_name<'tcx>(
tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), trait_ref);
push_item_name(tcx, trait_ref.def_id, true, &mut vtable_name);
visited.clear();
- push_generic_params_internal(tcx, trait_ref.substs, &mut vtable_name, &mut visited);
+ push_generic_params_internal(tcx, trait_ref.args, &mut vtable_name, &mut visited);
} else {
vtable_name.push('_');
}
@@ -565,9 +565,9 @@ fn push_disambiguated_special_name(
output: &mut String,
) {
if cpp_like_debuginfo {
- write!(output, "{}${}", label, disambiguator).unwrap();
+ write!(output, "{label}${disambiguator}").unwrap();
} else {
- write!(output, "{{{}#{}}}", label, disambiguator).unwrap();
+ write!(output, "{{{label}#{disambiguator}}}").unwrap();
}
}
@@ -609,21 +609,21 @@ fn push_unqualified_item_name(
fn push_generic_params_internal<'tcx>(
tcx: TyCtxt<'tcx>,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
output: &mut String,
visited: &mut FxHashSet<Ty<'tcx>>,
) -> bool {
- if substs.non_erasable_generics().next().is_none() {
+ if args.non_erasable_generics().next().is_none() {
return false;
}
- debug_assert_eq!(substs, tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), substs));
+ debug_assert_eq!(args, tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args));
let cpp_like_debuginfo = cpp_like_debuginfo(tcx);
output.push('<');
- for type_parameter in substs.non_erasable_generics() {
+ for type_parameter in args.non_erasable_generics() {
match type_parameter {
GenericArgKind::Type(type_parameter) => {
push_debuginfo_type_name(tcx, type_parameter, true, output, visited);
@@ -651,15 +651,15 @@ fn push_const_param<'tcx>(tcx: TyCtxt<'tcx>, ct: ty::Const<'tcx>, output: &mut S
ty::Int(ity) => {
let bits = ct.eval_bits(tcx, ty::ParamEnv::reveal_all(), ct.ty());
let val = Integer::from_int_ty(&tcx, *ity).size().sign_extend(bits) as i128;
- write!(output, "{}", val)
+ write!(output, "{val}")
}
ty::Uint(_) => {
let val = ct.eval_bits(tcx, ty::ParamEnv::reveal_all(), ct.ty());
- write!(output, "{}", val)
+ write!(output, "{val}")
}
ty::Bool => {
let val = ct.try_eval_bool(tcx, ty::ParamEnv::reveal_all()).unwrap();
- write!(output, "{}", val)
+ write!(output, "{val}")
}
_ => {
// If we cannot evaluate the constant to a known type, we fall back
@@ -678,9 +678,9 @@ fn push_const_param<'tcx>(tcx: TyCtxt<'tcx>, ct: ty::Const<'tcx>, output: &mut S
});
if cpp_like_debuginfo(tcx) {
- write!(output, "CONST${:x}", hash_short)
+ write!(output, "CONST${hash_short:x}")
} else {
- write!(output, "{{CONST#{:x}}}", hash_short)
+ write!(output, "{{CONST#{hash_short:x}}}")
}
}
},
@@ -688,16 +688,20 @@ fn push_const_param<'tcx>(tcx: TyCtxt<'tcx>, ct: ty::Const<'tcx>, output: &mut S
.unwrap();
}
-pub fn push_generic_params<'tcx>(tcx: TyCtxt<'tcx>, substs: SubstsRef<'tcx>, output: &mut String) {
+pub fn push_generic_params<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ args: GenericArgsRef<'tcx>,
+ output: &mut String,
+) {
let _prof = tcx.prof.generic_activity("compute_debuginfo_type_name");
let mut visited = FxHashSet::default();
- push_generic_params_internal(tcx, substs, output, &mut visited);
+ push_generic_params_internal(tcx, args, output, &mut visited);
}
fn push_closure_or_generator_name<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: DefId,
- substs: SubstsRef<'tcx>,
+ args: GenericArgsRef<'tcx>,
qualified: bool,
output: &mut String,
visited: &mut FxHashSet<Ty<'tcx>>,
@@ -731,10 +735,10 @@ fn push_closure_or_generator_name<'tcx>(
let enclosing_fn_def_id = tcx.typeck_root_def_id(def_id);
let generics = tcx.generics_of(enclosing_fn_def_id);
- // Truncate the substs to the length of the above generics. This will cut off
+ // Truncate the args to the length of the above generics. This will cut off
// anything closure- or generator-specific.
- let substs = substs.truncate_to(tcx, generics);
- push_generic_params_internal(tcx, substs, output, visited);
+ let args = args.truncate_to(tcx, generics);
+ push_generic_params_internal(tcx, args, output, visited);
}
fn push_close_angle_bracket(cpp_like_debuginfo: bool, output: &mut String) {
@@ -748,7 +752,7 @@ fn push_close_angle_bracket(cpp_like_debuginfo: bool, output: &mut String) {
}
fn pop_close_angle_bracket(output: &mut String) {
- assert!(output.ends_with('>'), "'output' does not end with '>': {}", output);
+ assert!(output.ends_with('>'), "'output' does not end with '>': {output}");
output.pop();
if output.ends_with(' ') {
output.pop();
diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs
index 056b4abd2..b7d8b9b45 100644
--- a/compiler/rustc_codegen_ssa/src/errors.rs
+++ b/compiler/rustc_codegen_ssa/src/errors.rs
@@ -177,31 +177,31 @@ impl IntoDiagnostic<'_> for ThorinErrorWrapper {
}
thorin::Error::NamelessSection(_, offset) => {
diag = handler.struct_err(fluent::codegen_ssa_thorin_section_without_name);
- diag.set_arg("offset", format!("0x{:08x}", offset));
+ diag.set_arg("offset", format!("0x{offset:08x}"));
diag
}
thorin::Error::RelocationWithInvalidSymbol(section, offset) => {
diag =
handler.struct_err(fluent::codegen_ssa_thorin_relocation_with_invalid_symbol);
diag.set_arg("section", section);
- diag.set_arg("offset", format!("0x{:08x}", offset));
+ diag.set_arg("offset", format!("0x{offset:08x}"));
diag
}
thorin::Error::MultipleRelocations(section, offset) => {
diag = handler.struct_err(fluent::codegen_ssa_thorin_multiple_relocations);
diag.set_arg("section", section);
- diag.set_arg("offset", format!("0x{:08x}", offset));
+ diag.set_arg("offset", format!("0x{offset:08x}"));
diag
}
thorin::Error::UnsupportedRelocation(section, offset) => {
diag = handler.struct_err(fluent::codegen_ssa_thorin_unsupported_relocation);
diag.set_arg("section", section);
- diag.set_arg("offset", format!("0x{:08x}", offset));
+ diag.set_arg("offset", format!("0x{offset:08x}"));
diag
}
thorin::Error::MissingDwoName(id) => {
diag = handler.struct_err(fluent::codegen_ssa_thorin_missing_dwo_name);
- diag.set_arg("id", format!("0x{:08x}", id));
+ diag.set_arg("id", format!("0x{id:08x}"));
diag
}
thorin::Error::NoCompilationUnits => {
@@ -251,7 +251,7 @@ impl IntoDiagnostic<'_> for ThorinErrorWrapper {
}
thorin::Error::StrAtOffset(_, offset) => {
diag = handler.struct_err(fluent::codegen_ssa_thorin_str_at_offset);
- diag.set_arg("offset", format!("0x{:08x}", offset));
+ diag.set_arg("offset", format!("0x{offset:08x}"));
diag
}
thorin::Error::ParseIndex(_, section) => {
@@ -261,7 +261,7 @@ impl IntoDiagnostic<'_> for ThorinErrorWrapper {
}
thorin::Error::UnitNotInIndex(unit) => {
diag = handler.struct_err(fluent::codegen_ssa_thorin_unit_not_in_index);
- diag.set_arg("unit", format!("0x{:08x}", unit));
+ diag.set_arg("unit", format!("0x{unit:08x}"));
diag
}
thorin::Error::RowNotInIndex(_, row) => {
@@ -275,7 +275,7 @@ impl IntoDiagnostic<'_> for ThorinErrorWrapper {
}
thorin::Error::EmptyUnit(unit) => {
diag = handler.struct_err(fluent::codegen_ssa_thorin_empty_unit);
- diag.set_arg("unit", format!("0x{:08x}", unit));
+ diag.set_arg("unit", format!("0x{unit:08x}"));
diag
}
thorin::Error::MultipleDebugInfoSection => {
@@ -292,12 +292,12 @@ impl IntoDiagnostic<'_> for ThorinErrorWrapper {
}
thorin::Error::DuplicateUnit(unit) => {
diag = handler.struct_err(fluent::codegen_ssa_thorin_duplicate_unit);
- diag.set_arg("unit", format!("0x{:08x}", unit));
+ diag.set_arg("unit", format!("0x{unit:08x}"));
diag
}
thorin::Error::MissingReferencedUnit(unit) => {
diag = handler.struct_err(fluent::codegen_ssa_thorin_missing_referenced_unit);
- diag.set_arg("unit", format!("0x{:08x}", unit));
+ diag.set_arg("unit", format!("0x{unit:08x}"));
diag
}
thorin::Error::NoOutputObjectCreated => {
@@ -353,7 +353,7 @@ impl IntoDiagnostic<'_> for LinkingFailed<'_> {
let contains_undefined_ref = self.escaped_output.contains("undefined reference to");
- diag.note(format!("{:?}", self.command)).note(self.escaped_output.to_string());
+ diag.note(format!("{:?}", self.command)).note(self.escaped_output);
// Trying to match an error from OS linkers
// which by now we have no way to translate.
@@ -456,6 +456,12 @@ pub struct LinkerFileStem;
pub struct StaticLibraryNativeArtifacts;
#[derive(Diagnostic)]
+#[diag(codegen_ssa_static_library_native_artifacts_to_file)]
+pub struct StaticLibraryNativeArtifactsToFile<'a> {
+ pub path: &'a Path,
+}
+
+#[derive(Diagnostic)]
#[diag(codegen_ssa_link_script_unavailable)]
pub struct LinkScriptUnavailable;
diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs
index be4c81638..7bed3fa61 100644
--- a/compiler/rustc_codegen_ssa/src/lib.rs
+++ b/compiler/rustc_codegen_ssa/src/lib.rs
@@ -2,7 +2,6 @@
#![feature(associated_type_bounds)]
#![feature(box_patterns)]
#![feature(if_let_guard)]
-#![feature(int_roundings)]
#![feature(let_chains)]
#![feature(negative_impls)]
#![feature(never_type)]
@@ -150,6 +149,7 @@ impl From<&cstore::NativeLib> for NativeLib {
#[derive(Debug, Encodable, Decodable)]
pub struct CrateInfo {
pub target_cpu: String,
+ pub crate_types: Vec<CrateType>,
pub exported_symbols: FxHashMap<CrateType, Vec<String>>,
pub linked_symbols: FxHashMap<CrateType, Vec<(String, SymbolExportKind)>>,
pub local_crate_name: Symbol,
diff --git a/compiler/rustc_codegen_ssa/src/meth.rs b/compiler/rustc_codegen_ssa/src/meth.rs
index a8b935bd6..12146a54d 100644
--- a/compiler/rustc_codegen_ssa/src/meth.rs
+++ b/compiler/rustc_codegen_ssa/src/meth.rs
@@ -1,6 +1,6 @@
use crate::traits::*;
-use rustc_middle::ty::{self, subst::GenericArgKind, Ty};
+use rustc_middle::ty::{self, GenericArgKind, Ty};
use rustc_session::config::Lto;
use rustc_symbol_mangling::typeid_for_trait_ref;
use rustc_target::abi::call::FnAbi;
@@ -23,7 +23,6 @@ impl<'a, 'tcx> VirtualIndex {
// Load the data pointer from the object.
debug!("get_fn({llvtable:?}, {ty:?}, {self:?})");
let llty = bx.fn_ptr_backend_type(fn_abi);
- let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
if bx.cx().sess().opts.unstable_opts.virtual_function_elimination
&& bx.cx().sess().lto() == Lto::Fat
@@ -33,7 +32,7 @@ impl<'a, 'tcx> VirtualIndex {
.unwrap();
let vtable_byte_offset = self.0 * bx.data_layout().pointer_size.bytes();
let func = bx.type_checked_load(llvtable, vtable_byte_offset, typeid);
- bx.pointercast(func, llty)
+ func
} else {
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]);
@@ -54,7 +53,6 @@ impl<'a, 'tcx> VirtualIndex {
debug!("get_int({:?}, {:?})", llvtable, self);
let llty = bx.type_isize();
- let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
let usize_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]);
let ptr = bx.load(llty, gep, usize_align);
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index 9d1b3ce82..4f26383ed 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -23,6 +23,8 @@ use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode, Reg};
use rustc_target::abi::{self, HasDataLayout, WrappingRange};
use rustc_target::spec::abi::Abi;
+use std::cmp;
+
// Indicates if we are in the middle of merging a BB's successor into it. This
// can happen when BB jumps directly to its successor and the successor has no
// other predecessors.
@@ -437,8 +439,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
ZeroSized => bug!("ZST return value shouldn't be in PassMode::Cast"),
};
let ty = bx.cast_backend_type(cast_ty);
- let addr = bx.pointercast(llslot, bx.type_ptr_to(ty));
- bx.load(ty, addr, self.fn_abi.ret.layout.align.abi)
+ bx.load(ty, llslot, self.fn_abi.ret.layout.align.abi)
}
};
bx.ret(llval);
@@ -491,7 +492,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
//
let virtual_drop = Instance {
def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
- substs: drop_fn.substs,
+ args: drop_fn.args,
};
debug!("ty = {:?}", ty);
debug!("drop_fn = {:?}", drop_fn);
@@ -531,7 +532,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// SO THEN WE CAN USE THE ABOVE CODE.
let virtual_drop = Instance {
def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
- substs: drop_fn.substs,
+ args: drop_fn.args,
};
debug!("ty = {:?}", ty);
debug!("drop_fn = {:?}", drop_fn);
@@ -687,7 +688,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// which mentions the offending type, even from a const context.
let panic_intrinsic = intrinsic.and_then(|s| ValidityRequirement::from_intrinsic(s));
if let Some(requirement) = panic_intrinsic {
- let ty = instance.unwrap().substs.type_at(0);
+ let ty = instance.unwrap().args.type_at(0);
let do_panic = !bx
.tcx()
@@ -701,13 +702,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
with_no_trimmed_paths!({
if layout.abi.is_uninhabited() {
// Use this error even for the other intrinsics as it is more precise.
- format!("attempted to instantiate uninhabited type `{}`", ty)
+ format!("attempted to instantiate uninhabited type `{ty}`")
} else if requirement == ValidityRequirement::Zero {
- format!("attempted to zero-initialize type `{}`, which is invalid", ty)
+ format!("attempted to zero-initialize type `{ty}`, which is invalid")
} else {
format!(
- "attempted to leave type `{}` uninitialized, which is invalid",
- ty
+ "attempted to leave type `{ty}` uninitialized, which is invalid"
)
}
})
@@ -760,13 +760,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let callee = self.codegen_operand(bx, func);
let (instance, mut llfn) = match *callee.layout.ty.kind() {
- ty::FnDef(def_id, substs) => (
+ ty::FnDef(def_id, args) => (
Some(
ty::Instance::expect_resolve(
bx.tcx(),
ty::ParamEnv::reveal_all(),
def_id,
- substs,
+ args,
)
.polymorphize(bx.tcx()),
),
@@ -851,9 +851,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
Some(intrinsic) => {
let dest = match ret_dest {
_ if fn_abi.ret.is_indirect() => llargs[0],
- ReturnDest::Nothing => {
- bx.const_undef(bx.type_ptr_to(bx.arg_memory_ty(&fn_abi.ret)))
- }
+ ReturnDest::Nothing => bx.const_undef(bx.type_ptr()),
ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval,
ReturnDest::DirectOperand(_) => {
bug!("Cannot use direct operand with an intrinsic call")
@@ -864,11 +862,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
.iter()
.enumerate()
.map(|(i, arg)| {
- // The indices passed to simd_shuffle* in the
+ // The indices passed to simd_shuffle in the
// third argument must be constant. This is
// checked by const-qualification, which also
// promotes any complex rvalues to constants.
- if i == 2 && intrinsic.as_str().starts_with("simd_shuffle") {
+ if i == 2 && intrinsic == sym::simd_shuffle {
if let mir::Operand::Constant(constant) = arg {
let (llval, ty) = self.simd_shuffle_indices(&bx, constant);
return OperandRef {
@@ -1043,10 +1041,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
assert_eq!(
fn_abi.args.len(),
mir_args + 1,
- "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR: {:?} {:?} {:?}",
- instance,
- fn_span,
- fn_abi,
+ "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR: {instance:?} {fn_span:?} {fn_abi:?}",
);
let location =
self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info });
@@ -1125,12 +1120,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
mir::InlineAsmOperand::SymFn { ref value } => {
let literal = self.monomorphize(value.literal);
- if let ty::FnDef(def_id, substs) = *literal.ty().kind() {
+ if let ty::FnDef(def_id, args) = *literal.ty().kind() {
let instance = ty::Instance::resolve_for_fn_ptr(
bx.tcx(),
ty::ParamEnv::reveal_all(),
def_id,
- substs,
+ args,
)
.unwrap();
InlineAsmOperandRef::SymFn { instance }
@@ -1360,36 +1355,58 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Force by-ref if we have to load through a cast pointer.
let (mut llval, align, by_ref) = match op.val {
Immediate(_) | Pair(..) => match arg.mode {
- PassMode::Indirect { .. } | PassMode::Cast(..) => {
+ PassMode::Indirect { attrs, .. } => {
+ // Indirect argument may have higher alignment requirements than the type's alignment.
+ // This can happen, e.g. when passing types with <4 byte alignment on the stack on x86.
+ let required_align = match attrs.pointee_align {
+ Some(pointee_align) => cmp::max(pointee_align, arg.layout.align.abi),
+ None => arg.layout.align.abi,
+ };
+ let scratch = PlaceRef::alloca_aligned(bx, arg.layout, required_align);
+ op.val.store(bx, scratch);
+ (scratch.llval, scratch.align, true)
+ }
+ PassMode::Cast(..) => {
let scratch = PlaceRef::alloca(bx, arg.layout);
op.val.store(bx, scratch);
(scratch.llval, scratch.align, true)
}
_ => (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false),
},
- Ref(llval, _, align) => {
- if arg.is_indirect() && align < arg.layout.align.abi {
- // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
- // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
- // have scary latent bugs around.
-
- let scratch = PlaceRef::alloca(bx, arg.layout);
- base::memcpy_ty(
- bx,
- scratch.llval,
- scratch.align,
- llval,
- align,
- op.layout,
- MemFlags::empty(),
- );
- (scratch.llval, scratch.align, true)
- } else {
- (llval, align, true)
+ Ref(llval, _, align) => match arg.mode {
+ PassMode::Indirect { attrs, .. } => {
+ let required_align = match attrs.pointee_align {
+ Some(pointee_align) => cmp::max(pointee_align, arg.layout.align.abi),
+ None => arg.layout.align.abi,
+ };
+ if align < required_align {
+ // For `foo(packed.large_field)`, and types with <4 byte alignment on x86,
+ // alignment requirements may be higher than the type's alignment, so copy
+ // to a higher-aligned alloca.
+ let scratch = PlaceRef::alloca_aligned(bx, arg.layout, required_align);
+ base::memcpy_ty(
+ bx,
+ scratch.llval,
+ scratch.align,
+ llval,
+ align,
+ op.layout,
+ MemFlags::empty(),
+ );
+ (scratch.llval, scratch.align, true)
+ } else {
+ (llval, align, true)
+ }
}
- }
+ _ => (llval, align, true),
+ },
ZeroSized => match arg.mode {
- PassMode::Indirect { .. } => {
+ PassMode::Indirect { on_stack, .. } => {
+ if on_stack {
+ // It doesn't seem like any target can have `byval` ZSTs, so this assert
+ // is here to replace a would-be untested codepath.
+ bug!("ZST {op:?} passed on stack with abi {arg:?}");
+ }
// Though `extern "Rust"` doesn't pass ZSTs, some ABIs pass
// a pointer for `repr(C)` structs even when empty, so get
// one from an `alloca` (which can be left uninitialized).
@@ -1404,8 +1421,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Have to load the argument, maybe while casting it.
if let PassMode::Cast(ty, _) = &arg.mode {
let llty = bx.cast_backend_type(ty);
- let addr = bx.pointercast(llval, bx.type_ptr_to(llty));
- llval = bx.load(llty, addr, align.min(arg.layout.align.abi));
+ llval = bx.load(llty, llval, align.min(arg.layout.align.abi));
} else {
// We can't use `PlaceRef::load` here because the argument
// may have a type we don't treat as immediate, but the ABI
@@ -1531,7 +1547,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn landing_pad_for_uncached(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
let llbb = self.llbb(bb);
if base::wants_new_eh_instructions(self.cx.sess()) {
- let cleanup_bb = Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb));
+ let cleanup_bb = Bx::append_block(self.cx, self.llfn, &format!("funclet_{bb:?}"));
let mut cleanup_bx = Bx::build(self.cx, cleanup_bb);
let funclet = cleanup_bx.cleanup_pad(None, &[]);
cleanup_bx.br(llbb);
@@ -1610,7 +1626,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// represents that this is a catch-all block.
bx = Bx::build(self.cx, cp_llbb);
let null =
- bx.const_null(bx.type_i8p_ext(bx.cx().data_layout().instruction_address_space));
+ bx.const_null(bx.type_ptr_ext(bx.cx().data_layout().instruction_address_space));
let sixty_four = bx.const_i32(64);
funclet = Some(bx.catch_pad(cs, &[null, sixty_four, null]));
} else {
@@ -1651,7 +1667,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match self.cached_llbbs[bb] {
CachedLlbb::None => {
// FIXME(eddyb) only name the block if `fewer_names` is `false`.
- let llbb = Bx::append_block(self.cx, self.llfn, &format!("{:?}", bb));
+ let llbb = Bx::append_block(self.cx, self.llfn, &format!("{bb:?}"));
self.cached_llbbs[bb] = CachedLlbb::Some(llbb);
Some(llbb)
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
index 1ee89b3d5..526c16a59 100644
--- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -42,9 +42,6 @@ pub struct PerLocalVarDebugInfo<'tcx, D> {
/// `.place.projection` from `mir::VarDebugInfo`.
pub projection: &'tcx ty::List<mir::PlaceElem<'tcx>>,
-
- /// `references` from `mir::VarDebugInfo`.
- pub references: u8,
}
#[derive(Clone, Copy, Debug)]
@@ -186,7 +183,11 @@ fn calculate_debuginfo_offset<
} => {
let offset = indirect_offsets.last_mut().unwrap_or(&mut direct_offset);
let FieldsShape::Array { stride, count: _ } = place.layout().fields else {
- span_bug!(var.source_info.span, "ConstantIndex on non-array type {:?}", place.layout())
+ span_bug!(
+ var.source_info.span,
+ "ConstantIndex on non-array type {:?}",
+ place.layout()
+ )
};
*offset += stride * index;
place = place.project_constant_index(bx, index);
@@ -319,7 +320,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
dbg_var,
fragment: None,
projection: ty::List::empty(),
- references: 0,
})
}
} else {
@@ -328,13 +328,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let local_ref = &self.locals[local];
- // FIXME Should the return place be named?
- let name = if bx.sess().fewer_names() || local == mir::RETURN_PLACE {
+ let name = if bx.sess().fewer_names() {
None
} else {
Some(match whole_local_var.or(fallback_var.clone()) {
Some(var) if var.name != kw::Empty => var.name.to_string(),
- _ => format!("{:?}", local),
+ _ => format!("{local:?}"),
})
};
@@ -396,15 +395,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
&self,
bx: &mut Bx,
local: mir::Local,
- mut base: PlaceRef<'tcx, Bx::Value>,
+ base: PlaceRef<'tcx, Bx::Value>,
var: PerLocalVarDebugInfo<'tcx, Bx::DIVariable>,
) {
let Some(dbg_var) = var.dbg_var else { return };
let Some(dbg_loc) = self.dbg_loc(var.source_info) else { return };
- let DebugInfoOffset { mut direct_offset, indirect_offsets, result: _ } =
+ let DebugInfoOffset { direct_offset, indirect_offsets, result: _ } =
calculate_debuginfo_offset(bx, local, &var, base.layout);
- let mut indirect_offsets = &indirect_offsets[..];
// When targeting MSVC, create extra allocas for arguments instead of pointing multiple
// dbg_var_addr() calls into the same alloca with offsets. MSVC uses CodeView records
@@ -418,9 +416,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// LLVM can handle simple things but anything more complex than just a direct
// offset or one indirect offset of 0 is too complex for it to generate CV records
// correctly.
- && (direct_offset != Size::ZERO || !matches!(indirect_offsets, [Size::ZERO] | []));
+ && (direct_offset != Size::ZERO || !matches!(&indirect_offsets[..], [Size::ZERO] | []));
+
+ if should_create_individual_allocas {
+ let DebugInfoOffset { direct_offset: _, indirect_offsets: _, result: place } =
+ calculate_debuginfo_offset(bx, local, &var, base);
- let create_alloca = |bx: &mut Bx, place: PlaceRef<'tcx, Bx::Value>, refcount| {
// Create a variable which will be a pointer to the actual value
let ptr_ty = Ty::new_ptr(
bx.tcx(),
@@ -428,35 +429,30 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
);
let ptr_layout = bx.layout_of(ptr_ty);
let alloca = PlaceRef::alloca(bx, ptr_layout);
- bx.set_var_name(alloca.llval, &format!("{}.ref{}.dbg.spill", var.name, refcount));
+ bx.set_var_name(alloca.llval, &(var.name.to_string() + ".dbg.spill"));
// Write the pointer to the variable
bx.store(place.llval, alloca.llval, alloca.align);
// Point the debug info to `*alloca` for the current variable
- alloca
- };
-
- if var.references > 0 {
- base = calculate_debuginfo_offset(bx, local, &var, base).result;
-
- // Point the debug info to `&...&base == alloca` for the current variable
- for refcount in 0..var.references {
- base = create_alloca(bx, base, refcount);
- }
-
- direct_offset = Size::ZERO;
- indirect_offsets = &[];
- } else if should_create_individual_allocas {
- let place = calculate_debuginfo_offset(bx, local, &var, base).result;
-
- // Point the debug info to `*alloca` for the current variable
- base = create_alloca(bx, place, 0);
- direct_offset = Size::ZERO;
- indirect_offsets = &[Size::ZERO];
+ bx.dbg_var_addr(
+ dbg_var,
+ dbg_loc,
+ alloca.llval,
+ Size::ZERO,
+ &[Size::ZERO],
+ var.fragment,
+ );
+ } else {
+ bx.dbg_var_addr(
+ dbg_var,
+ dbg_loc,
+ base.llval,
+ direct_offset,
+ &indirect_offsets,
+ var.fragment,
+ );
}
-
- bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, direct_offset, indirect_offsets, None);
}
pub fn debug_introduce_locals(&self, bx: &mut Bx) {
@@ -489,7 +485,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
let dbg_var = dbg_scope_and_span.map(|(dbg_scope, _, span)| {
- let (mut var_ty, var_kind) = match var.value {
+ let (var_ty, var_kind) = match var.value {
mir::VarDebugInfoContents::Place(place) => {
let var_ty = self.monomorphized_place_ty(place.as_ref());
let var_kind = if let Some(arg_index) = var.argument_index
@@ -526,13 +522,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
};
- for _ in 0..var.references {
- var_ty = Ty::new_ptr(
- bx.tcx(),
- ty::TypeAndMut { mutbl: mir::Mutability::Mut, ty: var_ty },
- );
- }
-
self.cx.create_dbg_var(var.name, var_ty, dbg_scope, var_kind, span)
});
@@ -544,7 +533,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
dbg_var,
fragment: None,
projection: place.projection,
- references: var.references,
});
}
mir::VarDebugInfoContents::Const(c) => {
@@ -586,19 +574,23 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
let place = fragment.contents;
+ let fragment = if fragment_layout.size == Size::ZERO {
+ // Fragment is a ZST, so does not represent anything.
+ continue;
+ } else if fragment_layout.size == var_layout.size {
+ // Fragment covers entire variable, so as far as
+ // DWARF is concerned, it's not really a fragment.
+ None
+ } else {
+ Some(fragment_start..fragment_start + fragment_layout.size)
+ };
+
per_local[place.local].push(PerLocalVarDebugInfo {
name: var.name,
source_info: var.source_info,
dbg_var,
- fragment: if fragment_layout.size == var_layout.size {
- // Fragment covers entire variable, so as far as
- // DWARF is concerned, it's not really a fragment.
- None
- } else {
- Some(fragment_start..fragment_start + fragment_layout.size)
- },
+ fragment,
projection: place.projection,
- references: var.references,
});
}
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
index 8a65dd593..8821fb21f 100644
--- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -64,7 +64,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
) {
let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
- let ty::FnDef(def_id, substs) = *callee_ty.kind() else {
+ let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else {
bug!("expected fn item type, found {}", callee_ty);
};
@@ -87,7 +87,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
sym::va_start => bx.va_start(args[0].immediate()),
sym::va_end => bx.va_end(args[0].immediate()),
sym::size_of_val => {
- let tp_ty = substs.type_at(0);
+ let tp_ty = fn_args.type_at(0);
if let OperandValue::Pair(_, meta) = args[0].val {
let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
llsize
@@ -96,7 +96,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
sym::min_align_of_val => {
- let tp_ty = substs.type_at(0);
+ let tp_ty = fn_args.type_at(0);
if let OperandValue::Pair(_, meta) = args[0].val {
let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
llalign
@@ -136,7 +136,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
}
sym::arith_offset => {
- let ty = substs.type_at(0);
+ let ty = fn_args.type_at(0);
let layout = bx.layout_of(ty);
let ptr = args[0].immediate();
let offset = args[1].immediate();
@@ -147,7 +147,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx,
true,
false,
- substs.type_at(0),
+ fn_args.type_at(0),
args[1].immediate(),
args[0].immediate(),
args[2].immediate(),
@@ -158,7 +158,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
memset_intrinsic(
bx,
false,
- substs.type_at(0),
+ fn_args.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
@@ -171,7 +171,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx,
false,
true,
- substs.type_at(0),
+ fn_args.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
@@ -183,7 +183,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx,
true,
true,
- substs.type_at(0),
+ fn_args.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
@@ -194,7 +194,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
memset_intrinsic(
bx,
true,
- substs.type_at(0),
+ fn_args.type_at(0),
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
@@ -270,7 +270,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
sym::const_allocate => {
// returns a null pointer at runtime.
- bx.const_null(bx.type_i8p())
+ bx.const_null(bx.type_ptr())
}
sym::const_deallocate => {
@@ -307,17 +307,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let Some((success, failure)) = ordering.split_once('_') else {
bx.sess().emit_fatal(errors::AtomicCompareExchange);
};
- let ty = substs.type_at(0);
+ let ty = fn_args.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
let weak = instruction == "cxchgweak";
- let mut dst = args[0].immediate();
+ let dst = args[0].immediate();
let mut cmp = args[1].immediate();
let mut src = args[2].immediate();
if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers,
// so we cast to integer first.
- let ptr_llty = bx.type_ptr_to(bx.type_isize());
- dst = bx.pointercast(dst, ptr_llty);
cmp = bx.ptrtoint(cmp, bx.type_isize());
src = bx.ptrtoint(src, bx.type_isize());
}
@@ -338,17 +336,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
"load" => {
- let ty = substs.type_at(0);
+ let ty = fn_args.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
let layout = bx.layout_of(ty);
let size = layout.size;
- let mut source = args[0].immediate();
+ let source = args[0].immediate();
if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers,
// so we cast to integer first...
let llty = bx.type_isize();
- let ptr_llty = bx.type_ptr_to(llty);
- source = bx.pointercast(source, ptr_llty);
let result = bx.atomic_load(llty, source, parse_ordering(bx, ordering), size);
// ... and then cast the result back to a pointer
bx.inttoptr(result, bx.backend_type(layout))
@@ -361,16 +357,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
"store" => {
- let ty = substs.type_at(0);
+ let ty = fn_args.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
let size = bx.layout_of(ty).size;
let mut val = args[1].immediate();
- let mut ptr = args[0].immediate();
+ let ptr = args[0].immediate();
if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers,
// so we cast to integer first.
- let ptr_llty = bx.type_ptr_to(bx.type_isize());
- ptr = bx.pointercast(ptr, ptr_llty);
val = bx.ptrtoint(val, bx.type_isize());
}
bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
@@ -407,15 +401,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
_ => bx.sess().emit_fatal(errors::UnknownAtomicOperation),
};
- let ty = substs.type_at(0);
+ let ty = fn_args.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
- let mut ptr = args[0].immediate();
+ let ptr = args[0].immediate();
let mut val = args[1].immediate();
if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers,
// so we cast to integer first.
- let ptr_llty = bx.type_ptr_to(bx.type_isize());
- ptr = bx.pointercast(ptr, ptr_llty);
val = bx.ptrtoint(val, bx.type_isize());
}
bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
@@ -439,7 +431,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
- let ty = substs.type_at(0);
+ let ty = fn_args.type_at(0);
let pointee_size = bx.layout_of(ty).size;
let a = args[0].immediate();
@@ -470,10 +462,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
if !fn_abi.ret.is_ignore() {
- if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
- let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(ty));
- let ptr = bx.pointercast(result.llval, ptr_llty);
- bx.store(llval, ptr, result.align);
+ if let PassMode::Cast(..) = &fn_abi.ret.mode {
+ bx.store(llval, result.llval, result.align);
} else {
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
.val
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs
index 9ff6a2497..3464f9108 100644
--- a/compiler/rustc_codegen_ssa/src/mir/mod.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs
@@ -159,7 +159,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
instance: Instance<'tcx>,
) {
- assert!(!instance.substs.has_infer());
+ assert!(!instance.args.has_infer());
let llfn = cx.get_fn(instance);
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
index 31c293d7c..f90d1a0fc 100644
--- a/compiler/rustc_codegen_ssa/src/mir/operand.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -2,7 +2,6 @@ use super::place::PlaceRef;
use super::{FunctionCx, LocalRef};
use crate::base;
-use crate::common::TypeKind;
use crate::glue;
use crate::traits::*;
use crate::MemFlags;
@@ -132,7 +131,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
) -> Self {
let alloc_align = alloc.inner().align;
assert_eq!(alloc_align, layout.align.abi);
- let ty = bx.type_ptr_to(bx.cx().backend_type(layout));
let read_scalar = |start, size, s: abi::Scalar, ty| {
let val = alloc
@@ -156,7 +154,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => {
let size = s.size(bx);
assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
- let val = read_scalar(Size::ZERO, size, s, ty);
+ let val = read_scalar(Size::ZERO, size, s, bx.type_ptr());
OperandRef { val: OperandValue::Immediate(val), layout }
}
Abi::ScalarPair(
@@ -187,7 +185,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
let base_addr = bx.static_addr_of(init, alloc_align, None);
let llval = bx.const_ptr_byte_offset(base_addr, offset);
- let llval = bx.const_bitcast(llval, ty);
bx.load_operand(PlaceRef::new_sized(llval, layout))
}
}
@@ -314,38 +311,22 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
) => {
// Bools in union fields needs to be truncated.
*llval = bx.to_immediate(*llval, field);
- // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
- let ty = bx.cx().immediate_backend_type(field);
- if bx.type_kind(ty) == TypeKind::Pointer {
- *llval = bx.pointercast(*llval, ty);
- }
}
(OperandValue::Pair(a, b), Abi::ScalarPair(a_abi, b_abi)) => {
// Bools in union fields needs to be truncated.
*a = bx.to_immediate_scalar(*a, a_abi);
*b = bx.to_immediate_scalar(*b, b_abi);
- // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
- let a_ty = bx.cx().scalar_pair_element_backend_type(field, 0, true);
- let b_ty = bx.cx().scalar_pair_element_backend_type(field, 1, true);
- if bx.type_kind(a_ty) == TypeKind::Pointer {
- *a = bx.pointercast(*a, a_ty);
- }
- if bx.type_kind(b_ty) == TypeKind::Pointer {
- *b = bx.pointercast(*b, b_ty);
- }
}
// Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]);
(OperandValue::Immediate(llval), Abi::Aggregate { sized: true }) => {
assert!(matches!(self.layout.abi, Abi::Vector { .. }));
- let llty = bx.cx().backend_type(self.layout);
let llfield_ty = bx.cx().backend_type(field);
// Can't bitcast an aggregate, so round trip through memory.
- let lltemp = bx.alloca(llfield_ty, field.align.abi);
- let llptr = bx.pointercast(lltemp, bx.cx().type_ptr_to(llty));
+ let llptr = bx.alloca(llfield_ty, field.align.abi);
bx.store(*llval, llptr, field.align.abi);
- *llval = bx.load(llfield_ty, lltemp, field.align.abi);
+ *llval = bx.load(llfield_ty, llptr, field.align.abi);
}
(OperandValue::Immediate(_), Abi::Uninhabited | Abi::Aggregate { sized: false }) => {
bug!()
@@ -380,9 +361,8 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
let ibty1 = bx.cx().scalar_pair_element_backend_type(layout, 1, true);
OperandValue::Pair(bx.const_poison(ibty0), bx.const_poison(ibty1))
} else {
- let bty = bx.cx().backend_type(layout);
- let ptr_bty = bx.cx().type_ptr_to(bty);
- OperandValue::Ref(bx.const_poison(ptr_bty), None, layout.align.abi)
+ let ptr = bx.cx().type_ptr();
+ OperandValue::Ref(bx.const_poison(ptr), None, layout.align.abi)
}
}
@@ -434,8 +414,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
let ty = bx.backend_type(dest.layout);
- let ptr = bx.pointercast(r, bx.type_ptr_to(ty));
- let val = bx.load(ty, ptr, source_align);
+ let val = bx.load(ty, r, source_align);
bx.store_with_flags(val, dest.llval, dest.align, flags);
return;
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs
index ab493ae5c..e7c3906d9 100644
--- a/compiler/rustc_codegen_ssa/src/mir/place.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/place.rs
@@ -48,9 +48,17 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
bx: &mut Bx,
layout: TyAndLayout<'tcx>,
) -> Self {
+ Self::alloca_aligned(bx, layout, layout.align.abi)
+ }
+
+ pub fn alloca_aligned<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ layout: TyAndLayout<'tcx>,
+ align: Align,
+ ) -> Self {
assert!(layout.is_sized(), "tried to statically allocate unsized place");
- let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
- Self::new_sized(tmp, layout)
+ let tmp = bx.alloca(bx.cx().backend_type(layout), align);
+ Self::new_sized_aligned(tmp, layout, align)
}
/// Returns a place for an indirect reference to an unsized place.
@@ -107,8 +115,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
}
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => {
// ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer.
- let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
- bx.gep(bx.cx().type_i8(), byte_ptr, &[bx.const_usize(offset.bytes())])
+ bx.gep(bx.cx().type_i8(), self.llval, &[bx.const_usize(offset.bytes())])
}
Abi::Scalar(_) | Abi::ScalarPair(..) => {
// All fields of Scalar and ScalarPair layouts must have been handled by this point.
@@ -125,8 +132,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
}
};
PlaceRef {
- // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
- llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
+ llval,
llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
layout: field,
align: effective_field_align,
@@ -186,20 +192,10 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
debug!("struct_field_ptr: DST field offset: {:?}", offset);
- // Cast and adjust pointer.
- let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
- let byte_ptr = bx.gep(bx.cx().type_i8(), byte_ptr, &[offset]);
+ // Adjust pointer.
+ let ptr = bx.gep(bx.cx().type_i8(), self.llval, &[offset]);
- // Finally, cast back to the type expected.
- let ll_fty = bx.cx().backend_type(field);
- debug!("struct_field_ptr: Field type is {:?}", ll_fty);
-
- PlaceRef {
- llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
- llextra: self.llextra,
- layout: field,
- align: effective_field_align,
- }
+ PlaceRef { llval: ptr, llextra: self.llextra, layout: field, align: effective_field_align }
}
/// Obtain the actual discriminant of a value.
@@ -408,11 +404,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
) -> Self {
let mut downcast = *self;
downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
-
- // Cast to the appropriate variant struct type.
- let variant_ty = bx.cx().backend_type(downcast.layout);
- downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
-
downcast
}
@@ -423,11 +414,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
) -> Self {
let mut downcast = *self;
downcast.layout = bx.cx().layout_of(ty);
-
- // Cast to the appropriate type.
- let variant_ty = bx.cx().backend_type(downcast.layout);
- downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
-
downcast
}
@@ -455,7 +441,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
LocalRef::Place(place) => place,
LocalRef::UnsizedPlace(place) => bx.load_operand(place).deref(cx),
LocalRef::Operand(..) => {
- if place_ref.has_deref() {
+ if place_ref.is_indirect_first_projection() {
base = 1;
let cg_base = self.codegen_consume(
bx,
@@ -507,13 +493,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
));
}
- // Cast the place pointer type to the new
- // array or slice type (`*[%_; new_len]`).
- subslice.llval = bx.pointercast(
- subslice.llval,
- bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)),
- );
-
subslice
}
mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v),
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index 956f03d25..07c61df21 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -182,9 +182,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValue::Immediate(..) | OperandValue::Pair(..) => {
// When we have immediate(s), the alignment of the source is irrelevant,
// so we can store them using the destination's alignment.
- let llty = bx.backend_type(src.layout);
- let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
- src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, dst.align));
+ src.val.store(bx, PlaceRef::new_sized_aligned(dst.llval, src.layout, dst.align));
}
}
}
@@ -222,9 +220,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValue::Ref(ptr, meta, align) => {
debug_assert_eq!(meta, None);
debug_assert!(matches!(operand_kind, OperandValueKind::Ref));
- let cast_bty = bx.backend_type(cast);
- let cast_ptr = bx.pointercast(ptr, bx.type_ptr_to(cast_bty));
- let fake_place = PlaceRef::new_sized_aligned(cast_ptr, cast, align);
+ let fake_place = PlaceRef::new_sized_aligned(ptr, cast, align);
Some(bx.load_operand(fake_place).val)
}
OperandValue::ZeroSized => {
@@ -397,8 +393,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
) -> OperandRef<'tcx, Bx::Value> {
assert!(
self.rvalue_creates_operand(rvalue, DUMMY_SP),
- "cannot codegen {:?} to operand",
- rvalue,
+ "cannot codegen {rvalue:?} to operand",
);
match *rvalue {
@@ -417,12 +412,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
mir::CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer) => {
match *operand.layout.ty.kind() {
- ty::FnDef(def_id, substs) => {
+ ty::FnDef(def_id, args) => {
let instance = ty::Instance::resolve_for_fn_ptr(
bx.tcx(),
ty::ParamEnv::reveal_all(),
def_id,
- substs,
+ args,
)
.unwrap()
.polymorphize(bx.cx().tcx());
@@ -433,11 +428,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_)) => {
match *operand.layout.ty.kind() {
- ty::Closure(def_id, substs) => {
+ ty::Closure(def_id, args) => {
let instance = Instance::resolve_closure(
bx.cx().tcx(),
def_id,
- substs,
+ args,
ty::ClosureKind::FnOnce,
)
.expect("failed to normalize and resolve closure during codegen")
@@ -480,18 +475,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
{
if let OperandValue::Pair(data_ptr, meta) = operand.val {
if bx.cx().is_backend_scalar_pair(cast) {
- let data_cast = bx.pointercast(
- data_ptr,
- bx.cx().scalar_pair_element_backend_type(cast, 0, true),
- );
- OperandValue::Pair(data_cast, meta)
+ OperandValue::Pair(data_ptr, meta)
} else {
- // cast to thin-ptr
- // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
- // pointer-cast of that pointer to desired pointer type.
- let llcast_ty = bx.cx().immediate_backend_type(cast);
- let llval = bx.pointercast(data_ptr, llcast_ty);
- OperandValue::Immediate(llval)
+ // Cast of fat-ptr to thin-ptr is an extraction of data-ptr.
+ OperandValue::Immediate(data_ptr)
}
} else {
bug!("unexpected non-pair operand");
@@ -711,7 +698,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
{
let instance = ty::Instance {
def: ty::InstanceDef::ThreadLocalShim(def_id),
- substs: ty::InternalSubsts::empty(),
+ args: ty::GenericArgs::empty(),
};
let fn_ptr = bx.get_fn_addr(instance);
let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty());
@@ -736,13 +723,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
let operand = self.codegen_operand(bx, operand);
- let lloperand = operand.immediate();
+ let val = operand.immediate();
let content_ty = self.monomorphize(content_ty);
let box_layout = bx.cx().layout_of(Ty::new_box(bx.tcx(), content_ty));
- let llty_ptr = bx.cx().backend_type(box_layout);
- let val = bx.pointercast(lloperand, llty_ptr);
OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
}
}
diff --git a/compiler/rustc_codegen_ssa/src/mono_item.rs b/compiler/rustc_codegen_ssa/src/mono_item.rs
index 27da33581..6fbf992ed 100644
--- a/compiler/rustc_codegen_ssa/src/mono_item.rs
+++ b/compiler/rustc_codegen_ssa/src/mono_item.rs
@@ -64,7 +64,7 @@ impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {
.typeck_body(anon_const.body)
.node_type(anon_const.hir_id);
let instance = match ty.kind() {
- &ty::FnDef(def_id, substs) => Instance::new(def_id, substs),
+ &ty::FnDef(def_id, args) => Instance::new(def_id, args),
_ => span_bug!(*op_sp, "asm sym is not a function"),
};
@@ -138,10 +138,10 @@ impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {
fn to_raw_string(&self) -> String {
match *self {
MonoItem::Fn(instance) => {
- format!("Fn({:?}, {})", instance.def, instance.substs.as_ptr().addr())
+ format!("Fn({:?}, {})", instance.def, instance.args.as_ptr().addr())
}
- MonoItem::Static(id) => format!("Static({:?})", id),
- MonoItem::GlobalAsm(id) => format!("GlobalAsm({:?})", id),
+ MonoItem::Static(id) => format!("Static({id:?})"),
+ MonoItem::GlobalAsm(id) => format!("GlobalAsm({id:?})"),
}
}
}
diff --git a/compiler/rustc_codegen_ssa/src/target_features.rs b/compiler/rustc_codegen_ssa/src/target_features.rs
index 9e06fec55..baf6b19d3 100644
--- a/compiler/rustc_codegen_ssa/src/target_features.rs
+++ b/compiler/rustc_codegen_ssa/src/target_features.rs
@@ -29,7 +29,6 @@ const ARM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
("aclass", Some(sym::arm_target_feature)),
("aes", Some(sym::arm_target_feature)),
("crc", Some(sym::arm_target_feature)),
- ("crypto", Some(sym::arm_target_feature)),
("d32", Some(sym::arm_target_feature)),
("dotprod", Some(sym::arm_target_feature)),
("dsp", Some(sym::arm_target_feature)),
@@ -297,6 +296,52 @@ const WASM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
const BPF_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[("alu32", Some(sym::bpf_target_feature))];
+const CSKY_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ // tidy-alphabetical-start
+ ("10e60", Some(sym::csky_target_feature)),
+ ("2e3", Some(sym::csky_target_feature)),
+ ("3e3r1", Some(sym::csky_target_feature)),
+ ("3e3r2", Some(sym::csky_target_feature)),
+ ("3e3r3", Some(sym::csky_target_feature)),
+ ("3e7", Some(sym::csky_target_feature)),
+ ("7e10", Some(sym::csky_target_feature)),
+ ("cache", Some(sym::csky_target_feature)),
+ ("doloop", Some(sym::csky_target_feature)),
+ ("dsp1e2", Some(sym::csky_target_feature)),
+ ("dspe60", Some(sym::csky_target_feature)),
+ ("e1", Some(sym::csky_target_feature)),
+ ("e2", Some(sym::csky_target_feature)),
+ ("edsp", Some(sym::csky_target_feature)),
+ ("elrw", Some(sym::csky_target_feature)),
+ ("float1e2", Some(sym::csky_target_feature)),
+ ("float1e3", Some(sym::csky_target_feature)),
+ ("float3e4", Some(sym::csky_target_feature)),
+ ("float7e60", Some(sym::csky_target_feature)),
+ ("floate1", Some(sym::csky_target_feature)),
+ ("hard-tp", Some(sym::csky_target_feature)),
+ ("high-registers", Some(sym::csky_target_feature)),
+ ("hwdiv", Some(sym::csky_target_feature)),
+ ("mp", Some(sym::csky_target_feature)),
+ ("mp1e2", Some(sym::csky_target_feature)),
+ ("nvic", Some(sym::csky_target_feature)),
+ ("trust", Some(sym::csky_target_feature)),
+ ("vdsp2e60f", Some(sym::csky_target_feature)),
+ ("vdspv1", Some(sym::csky_target_feature)),
+ ("vdspv2", Some(sym::csky_target_feature)),
+ // tidy-alphabetical-end
+ //fpu
+ // tidy-alphabetical-start
+ ("fdivdu", Some(sym::csky_target_feature)),
+ ("fpuv2_df", Some(sym::csky_target_feature)),
+ ("fpuv2_sf", Some(sym::csky_target_feature)),
+ ("fpuv3_df", Some(sym::csky_target_feature)),
+ ("fpuv3_hf", Some(sym::csky_target_feature)),
+ ("fpuv3_hi", Some(sym::csky_target_feature)),
+ ("fpuv3_sf", Some(sym::csky_target_feature)),
+ ("hard-float", Some(sym::csky_target_feature)),
+ ("hard-float-abi", Some(sym::csky_target_feature)),
+ // tidy-alphabetical-end
+];
/// When rustdoc is running, provide a list of all known features so that all their respective
/// primitives may be documented.
///
@@ -312,6 +357,7 @@ pub fn all_known_features() -> impl Iterator<Item = (&'static str, Option<Symbol
.chain(RISCV_ALLOWED_FEATURES.iter())
.chain(WASM_ALLOWED_FEATURES.iter())
.chain(BPF_ALLOWED_FEATURES.iter())
+ .chain(CSKY_ALLOWED_FEATURES)
.cloned()
}
@@ -321,11 +367,12 @@ pub fn supported_target_features(sess: &Session) -> &'static [(&'static str, Opt
"aarch64" => AARCH64_ALLOWED_FEATURES,
"x86" | "x86_64" => X86_ALLOWED_FEATURES,
"hexagon" => HEXAGON_ALLOWED_FEATURES,
- "mips" | "mips64" => MIPS_ALLOWED_FEATURES,
+ "mips" | "mips32r6" | "mips64" | "mips64r6" => MIPS_ALLOWED_FEATURES,
"powerpc" | "powerpc64" => POWERPC_ALLOWED_FEATURES,
"riscv32" | "riscv64" => RISCV_ALLOWED_FEATURES,
"wasm32" | "wasm64" => WASM_ALLOWED_FEATURES,
"bpf" => BPF_ALLOWED_FEATURES,
+ "csky" => CSKY_ALLOWED_FEATURES,
_ => &[],
}
}
@@ -369,13 +416,9 @@ pub fn from_target_feature(
// We allow comma separation to enable multiple features.
target_features.extend(value.as_str().split(',').filter_map(|feature| {
let Some(feature_gate) = supported_target_features.get(feature) else {
- let msg =
- format!("the feature named `{}` is not valid for this target", feature);
+ let msg = format!("the feature named `{feature}` is not valid for this target");
let mut err = tcx.sess.struct_span_err(item.span(), msg);
- err.span_label(
- item.span(),
- format!("`{}` is not valid for this target", feature),
- );
+ err.span_label(item.span(), format!("`{feature}` is not valid for this target"));
if let Some(stripped) = feature.strip_prefix('+') {
let valid = supported_target_features.contains_key(stripped);
if valid {
@@ -401,6 +444,7 @@ pub fn from_target_feature(
Some(sym::ermsb_target_feature) => rust_features.ermsb_target_feature,
Some(sym::bpf_target_feature) => rust_features.bpf_target_feature,
Some(sym::aarch64_ver_target_feature) => rust_features.aarch64_ver_target_feature,
+ Some(sym::csky_target_feature) => rust_features.csky_target_feature,
Some(name) => bug!("unknown target feature gate {}", name),
None => true,
};
@@ -409,7 +453,7 @@ pub fn from_target_feature(
&tcx.sess.parse_sess,
feature_gate.unwrap(),
item.span(),
- format!("the target feature `{}` is currently unstable", feature),
+ format!("the target feature `{feature}` is currently unstable"),
)
.emit();
}
diff --git a/compiler/rustc_codegen_ssa/src/traits/backend.rs b/compiler/rustc_codegen_ssa/src/traits/backend.rs
index b3c9ecf8b..0a02ca6b3 100644
--- a/compiler/rustc_codegen_ssa/src/traits/backend.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/backend.rs
@@ -23,6 +23,8 @@ use rustc_span::symbol::Symbol;
use rustc_target::abi::call::FnAbi;
use rustc_target::spec::Target;
+use std::fmt;
+
pub trait BackendTypes {
type Value: CodegenObject;
type Function: CodegenObject;
@@ -61,7 +63,7 @@ pub trait CodegenBackend {
fn locale_resource(&self) -> &'static str;
fn init(&self, _sess: &Session) {}
- fn print(&self, _req: PrintRequest, _sess: &Session) {}
+ fn print(&self, _req: &PrintRequest, _out: &mut dyn PrintBackendInfo, _sess: &Session) {}
fn target_features(&self, _sess: &Session, _allow_unstable: bool) -> Vec<Symbol> {
vec![]
}
@@ -140,15 +142,6 @@ pub trait ExtraBackendMethods:
target_features: &[String],
) -> TargetMachineFactoryFn<Self>;
- fn spawn_thread<F, T>(_time_trace: bool, f: F) -> std::thread::JoinHandle<T>
- where
- F: FnOnce() -> T,
- F: Send + 'static,
- T: Send + 'static,
- {
- std::thread::spawn(f)
- }
-
fn spawn_named_thread<F, T>(
_time_trace: bool,
name: String,
@@ -162,3 +155,19 @@ pub trait ExtraBackendMethods:
std::thread::Builder::new().name(name).spawn(f)
}
}
+
+pub trait PrintBackendInfo {
+ fn infallible_write_fmt(&mut self, args: fmt::Arguments<'_>);
+}
+
+impl PrintBackendInfo for String {
+ fn infallible_write_fmt(&mut self, args: fmt::Arguments<'_>) {
+ fmt::Write::write_fmt(self, args).unwrap();
+ }
+}
+
+impl dyn PrintBackendInfo + '_ {
+ pub fn write_fmt(&mut self, args: fmt::Arguments<'_>) {
+ self.infallible_write_fmt(args);
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/consts.rs b/compiler/rustc_codegen_ssa/src/traits/consts.rs
index d6e9bfce1..4dff9c768 100644
--- a/compiler/rustc_codegen_ssa/src/traits/consts.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/consts.rs
@@ -5,7 +5,13 @@ use rustc_target::abi;
pub trait ConstMethods<'tcx>: BackendTypes {
// Constant constructors
fn const_null(&self, t: Self::Type) -> Self::Value;
+ /// Generate an uninitialized value (matching uninitialized memory in MIR).
+ /// Whether memory is initialized or not is tracked byte-for-byte.
fn const_undef(&self, t: Self::Type) -> Self::Value;
+ /// Generate a fake value. Poison always affects the entire value, even if just a single byte is
+ /// poison. This can only be used in codepaths that are already UB, i.e., UB-free Rust code
+ /// (including code that e.g. copies uninit memory with `MaybeUninit`) can never encounter a
+ /// poison value.
fn const_poison(&self, t: Self::Type) -> Self::Value;
fn const_int(&self, t: Self::Type, i: i64) -> Self::Value;
fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value;
@@ -30,7 +36,6 @@ pub trait ConstMethods<'tcx>: BackendTypes {
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: Self::Type) -> Self::Value;
- fn const_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
fn const_bitcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
fn const_ptr_byte_offset(&self, val: Self::Value, offset: abi::Size) -> Self::Value;
}
diff --git a/compiler/rustc_codegen_ssa/src/traits/mod.rs b/compiler/rustc_codegen_ssa/src/traits/mod.rs
index 8cb58bd4c..728c2bc8c 100644
--- a/compiler/rustc_codegen_ssa/src/traits/mod.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/mod.rs
@@ -30,7 +30,9 @@ mod write;
pub use self::abi::AbiBuilderMethods;
pub use self::asm::{AsmBuilderMethods, AsmMethods, GlobalAsmOperandRef, InlineAsmOperandRef};
-pub use self::backend::{Backend, BackendTypes, CodegenBackend, ExtraBackendMethods};
+pub use self::backend::{
+ Backend, BackendTypes, CodegenBackend, ExtraBackendMethods, PrintBackendInfo,
+};
pub use self::builder::{BuilderMethods, OverflowOp};
pub use self::consts::ConstMethods;
pub use self::coverageinfo::CoverageInfoBuilderMethods;
diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs
index e64417e1a..dc3dbd9d8 100644
--- a/compiler/rustc_codegen_ssa/src/traits/type_.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs
@@ -26,8 +26,8 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> {
fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type;
fn type_kind(&self, ty: Self::Type) -> TypeKind;
- fn type_ptr_to(&self, ty: Self::Type) -> Self::Type;
- fn type_ptr_to_ext(&self, ty: Self::Type, address_space: AddressSpace) -> Self::Type;
+ fn type_ptr(&self) -> Self::Type;
+ fn type_ptr_ext(&self, address_space: AddressSpace) -> Self::Type;
fn element_type(&self, ty: Self::Type) -> Self::Type;
/// Returns the number of elements in `self` if it is a LLVM vector type.
@@ -42,14 +42,6 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> {
}
pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {
- fn type_i8p(&self) -> Self::Type {
- self.type_i8p_ext(AddressSpace::DATA)
- }
-
- fn type_i8p_ext(&self, address_space: AddressSpace) -> Self::Type {
- self.type_ptr_to_ext(self.type_i8(), address_space)
- }
-
fn type_int(&self) -> Self::Type {
match &self.sess().target.c_int_width[..] {
"16" => self.type_i16(),
diff --git a/compiler/rustc_codegen_ssa/src/traits/write.rs b/compiler/rustc_codegen_ssa/src/traits/write.rs
index 9826256a4..ecf5095d8 100644
--- a/compiler/rustc_codegen_ssa/src/traits/write.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/write.rs
@@ -1,5 +1,5 @@
use crate::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
-use crate::back::write::{CodegenContext, FatLTOInput, ModuleConfig};
+use crate::back::write::{CodegenContext, FatLtoInput, ModuleConfig};
use crate::{CompiledModule, ModuleCodegen};
use rustc_errors::{FatalError, Handler};
@@ -23,7 +23,7 @@ pub trait WriteBackendMethods: 'static + Sized + Clone {
/// for further optimization.
fn run_fat_lto(
cgcx: &CodegenContext<Self>,
- modules: Vec<FatLTOInput<Self>>,
+ modules: Vec<FatLtoInput<Self>>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<LtoModuleCodegen<Self>, FatalError>;
/// Performs thin LTO by performing necessary global analysis and returning two
@@ -35,6 +35,7 @@ pub trait WriteBackendMethods: 'static + Sized + Clone {
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError>;
fn print_pass_timings(&self);
+ fn print_statistics(&self);
unsafe fn optimize(
cgcx: &CodegenContext<Self>,
diag_handler: &Handler,