diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:18:21 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:18:21 +0000 |
commit | 4e8199b572f2035b7749cba276ece3a26630d23e (patch) | |
tree | f09feeed6a0fe39d027b1908aa63ea6b35e4b631 /compiler/rustc_codegen_ssa | |
parent | Adding upstream version 1.66.0+dfsg1. (diff) | |
download | rustc-4e8199b572f2035b7749cba276ece3a26630d23e.tar.xz rustc-4e8199b572f2035b7749cba276ece3a26630d23e.zip |
Adding upstream version 1.67.1+dfsg1.upstream/1.67.1+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_ssa')
26 files changed, 1688 insertions, 1017 deletions
diff --git a/compiler/rustc_codegen_ssa/Cargo.toml b/compiler/rustc_codegen_ssa/Cargo.toml index d868e3d56..345174fb5 100644 --- a/compiler/rustc_codegen_ssa/Cargo.toml +++ b/compiler/rustc_codegen_ssa/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" test = false [dependencies] +ar_archive_writer = "0.1.1" bitflags = "1.2.1" cc = "1.0.69" itertools = "0.10.1" diff --git a/compiler/rustc_codegen_ssa/src/back/archive.rs b/compiler/rustc_codegen_ssa/src/back/archive.rs index bb76ca5d2..58558fb8c 100644 --- a/compiler/rustc_codegen_ssa/src/back/archive.rs +++ b/compiler/rustc_codegen_ssa/src/back/archive.rs @@ -4,13 +4,22 @@ use rustc_session::cstore::DllImport; use rustc_session::Session; use rustc_span::symbol::Symbol; +use super::metadata::search_for_section; + +pub use ar_archive_writer::get_native_object_symbols; +use ar_archive_writer::{write_archive_to_stream, ArchiveKind, NewArchiveMember}; use object::read::archive::ArchiveFile; +use object::read::macho::FatArch; +use tempfile::Builder as TempFileBuilder; -use std::fmt::Display; +use std::error::Error; use std::fs::File; -use std::io; +use std::io::{self, Write}; use std::path::{Path, PathBuf}; +// Re-exporting for rustc_codegen_llvm::back::archive +pub use crate::errors::{ArchiveBuildFailure, ExtractBundledLibsError, UnknownArchiveKind}; + pub trait ArchiveBuilderBuilder { fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder<'a> + 'a>; @@ -28,32 +37,38 @@ pub trait ArchiveBuilderBuilder { is_direct_dependency: bool, ) -> PathBuf; - fn extract_bundled_libs( - &self, - rlib: &Path, + fn extract_bundled_libs<'a>( + &'a self, + rlib: &'a Path, outdir: &Path, bundled_lib_file_names: &FxHashSet<Symbol>, - ) -> Result<(), String> { - let message = |msg: &str, e: &dyn Display| format!("{} '{}': {}", msg, &rlib.display(), e); + ) -> Result<(), ExtractBundledLibsError<'_>> { let archive_map = unsafe { - Mmap::map(File::open(rlib).map_err(|e| message("failed to open file", &e))?) - .map_err(|e| message("failed to mmap file", &e))? + Mmap::map( + File::open(rlib) + .map_err(|e| ExtractBundledLibsError::OpenFile { rlib, error: Box::new(e) })?, + ) + .map_err(|e| ExtractBundledLibsError::MmapFile { rlib, error: Box::new(e) })? }; let archive = ArchiveFile::parse(&*archive_map) - .map_err(|e| message("failed to parse archive", &e))?; + .map_err(|e| ExtractBundledLibsError::ParseArchive { rlib, error: Box::new(e) })?; for entry in archive.members() { - let entry = entry.map_err(|e| message("failed to read entry", &e))?; + let entry = entry + .map_err(|e| ExtractBundledLibsError::ReadEntry { rlib, error: Box::new(e) })?; let data = entry .data(&*archive_map) - .map_err(|e| message("failed to get data from archive member", &e))?; + .map_err(|e| ExtractBundledLibsError::ArchiveMember { rlib, error: Box::new(e) })?; let name = std::str::from_utf8(entry.name()) - .map_err(|e| message("failed to convert name", &e))?; + .map_err(|e| ExtractBundledLibsError::ConvertName { rlib, error: Box::new(e) })?; if !bundled_lib_file_names.contains(&Symbol::intern(name)) { continue; // We need to extract only native libraries. } + let data = search_for_section(rlib, data, ".bundled_lib").map_err(|e| { + ExtractBundledLibsError::ExtractSection { rlib, error: Box::<dyn Error>::from(e) } + })?; std::fs::write(&outdir.join(&name), data) - .map_err(|e| message("failed to write file", &e))?; + .map_err(|e| ExtractBundledLibsError::WriteFile { rlib, error: Box::new(e) })?; } Ok(()) } @@ -70,3 +85,225 @@ pub trait ArchiveBuilder<'a> { fn build(self: Box<Self>, output: &Path) -> bool; } + +#[must_use = "must call build() to finish building the archive"] +pub struct ArArchiveBuilder<'a> { + sess: &'a Session, + get_object_symbols: + fn(buf: &[u8], f: &mut dyn FnMut(&[u8]) -> io::Result<()>) -> io::Result<bool>, + + src_archives: Vec<(PathBuf, Mmap)>, + // Don't use an `HashMap` here, as the order is important. `lib.rmeta` needs + // to be at the end of an archive in some cases for linkers to not get confused. + entries: Vec<(Vec<u8>, ArchiveEntry)>, +} + +#[derive(Debug)] +enum ArchiveEntry { + FromArchive { archive_index: usize, file_range: (u64, u64) }, + File(PathBuf), +} + +impl<'a> ArArchiveBuilder<'a> { + pub fn new( + sess: &'a Session, + get_object_symbols: fn( + buf: &[u8], + f: &mut dyn FnMut(&[u8]) -> io::Result<()>, + ) -> io::Result<bool>, + ) -> ArArchiveBuilder<'a> { + ArArchiveBuilder { sess, get_object_symbols, src_archives: vec![], entries: vec![] } + } +} + +fn try_filter_fat_archs( + archs: object::read::Result<&[impl FatArch]>, + target_arch: object::Architecture, + archive_path: &Path, + archive_map_data: &[u8], +) -> io::Result<Option<PathBuf>> { + let archs = archs.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + + let desired = match archs.iter().filter(|a| a.architecture() == target_arch).next() { + Some(a) => a, + None => return Ok(None), + }; + + let (mut new_f, extracted_path) = tempfile::Builder::new() + .suffix(archive_path.file_name().unwrap()) + .tempfile()? + .keep() + .unwrap(); + + new_f.write_all( + desired.data(archive_map_data).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?, + )?; + + Ok(Some(extracted_path)) +} + +pub fn try_extract_macho_fat_archive( + sess: &Session, + archive_path: &Path, +) -> io::Result<Option<PathBuf>> { + let archive_map = unsafe { Mmap::map(File::open(&archive_path)?)? }; + let target_arch = match sess.target.arch.as_ref() { + "aarch64" => object::Architecture::Aarch64, + "x86_64" => object::Architecture::X86_64, + _ => return Ok(None), + }; + + match object::macho::FatHeader::parse(&*archive_map) { + Ok(h) if h.magic.get(object::endian::BigEndian) == object::macho::FAT_MAGIC => { + let archs = object::macho::FatHeader::parse_arch32(&*archive_map); + try_filter_fat_archs(archs, target_arch, archive_path, &*archive_map) + } + Ok(h) if h.magic.get(object::endian::BigEndian) == object::macho::FAT_MAGIC_64 => { + let archs = object::macho::FatHeader::parse_arch64(&*archive_map); + try_filter_fat_archs(archs, target_arch, archive_path, &*archive_map) + } + // Not a FatHeader at all, just return None. + _ => Ok(None), + } +} + +impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> { + fn add_archive( + &mut self, + archive_path: &Path, + mut skip: Box<dyn FnMut(&str) -> bool + 'static>, + ) -> io::Result<()> { + let mut archive_path = archive_path.to_path_buf(); + if self.sess.target.llvm_target.contains("-apple-macosx") { + if let Some(new_archive_path) = + try_extract_macho_fat_archive(&self.sess, &archive_path)? + { + archive_path = new_archive_path + } + } + + if self.src_archives.iter().any(|archive| archive.0 == archive_path) { + return Ok(()); + } + + let archive_map = unsafe { Mmap::map(File::open(&archive_path)?)? }; + let archive = ArchiveFile::parse(&*archive_map) + .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?; + let archive_index = self.src_archives.len(); + + for entry in archive.members() { + let entry = entry.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?; + let file_name = String::from_utf8(entry.name().to_vec()) + .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?; + if !skip(&file_name) { + self.entries.push(( + file_name.into_bytes(), + ArchiveEntry::FromArchive { archive_index, file_range: entry.file_range() }, + )); + } + } + + self.src_archives.push((archive_path.to_owned(), archive_map)); + Ok(()) + } + + /// Adds an arbitrary file to this archive + fn add_file(&mut self, file: &Path) { + self.entries.push(( + file.file_name().unwrap().to_str().unwrap().to_string().into_bytes(), + ArchiveEntry::File(file.to_owned()), + )); + } + + /// Combine the provided files, rlibs, and native libraries into a single + /// `Archive`. + fn build(self: Box<Self>, output: &Path) -> bool { + let sess = self.sess; + match self.build_inner(output) { + Ok(any_members) => any_members, + Err(e) => sess.emit_fatal(ArchiveBuildFailure { error: e }), + } + } +} + +impl<'a> ArArchiveBuilder<'a> { + fn build_inner(self, output: &Path) -> io::Result<bool> { + let archive_kind = match &*self.sess.target.archive_format { + "gnu" => ArchiveKind::Gnu, + "bsd" => ArchiveKind::Bsd, + "darwin" => ArchiveKind::Darwin, + "coff" => ArchiveKind::Coff, + kind => { + self.sess.emit_fatal(UnknownArchiveKind { kind }); + } + }; + + let mut entries = Vec::new(); + + for (entry_name, entry) in self.entries { + let data = + match entry { + ArchiveEntry::FromArchive { archive_index, file_range } => { + let src_archive = &self.src_archives[archive_index]; + + let data = &src_archive.1 + [file_range.0 as usize..file_range.0 as usize + file_range.1 as usize]; + + Box::new(data) as Box<dyn AsRef<[u8]>> + } + ArchiveEntry::File(file) => unsafe { + Box::new( + Mmap::map(File::open(file).map_err(|err| { + io_error_context("failed to open object file", err) + })?) + .map_err(|err| io_error_context("failed to map object file", err))?, + ) as Box<dyn AsRef<[u8]>> + }, + }; + + entries.push(NewArchiveMember { + buf: data, + get_symbols: self.get_object_symbols, + member_name: String::from_utf8(entry_name).unwrap(), + mtime: 0, + uid: 0, + gid: 0, + perms: 0o644, + }) + } + + // Write to a temporary file first before atomically renaming to the final name. + // This prevents programs (including rustc) from attempting to read a partial archive. + // It also enables writing an archive with the same filename as a dependency on Windows as + // required by a test. + let mut archive_tmpfile = TempFileBuilder::new() + .suffix(".temp-archive") + .tempfile_in(output.parent().unwrap_or_else(|| Path::new(""))) + .map_err(|err| io_error_context("couldn't create a temp file", err))?; + + write_archive_to_stream( + archive_tmpfile.as_file_mut(), + &entries, + true, + archive_kind, + true, + false, + )?; + + let any_entries = !entries.is_empty(); + drop(entries); + // Drop src_archives to unmap all input archives, which is necessary if we want to write the + // output archive to the same location as an input archive on Windows. + drop(self.src_archives); + + archive_tmpfile + .persist(output) + .map_err(|err| io_error_context("failed to rename archive file", err.error))?; + + Ok(any_entries) + } +} + +fn io_error_context(context: &str, err: io::Error) -> io::Error { + io::Error::new(io::ErrorKind::Other, format!("{context}: {err}")) +} diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs index 0dc0dee86..fe2e4b36c 100644 --- a/compiler/rustc_codegen_ssa/src/back/link.rs +++ b/compiler/rustc_codegen_ssa/src/back/link.rs @@ -6,9 +6,9 @@ use rustc_data_structures::memmap::Mmap; use rustc_data_structures::temp_dir::MaybeTempDir; use rustc_errors::{ErrorGuaranteed, Handler}; use rustc_fs_util::fix_windows_verbatim_for_gcc; -use rustc_hir::def_id::CrateNum; +use rustc_hir::def_id::{CrateNum, LOCAL_CRATE}; use rustc_metadata::find_native_static_library; -use rustc_metadata::fs::{emit_metadata, METADATA_FILENAME}; +use rustc_metadata::fs::{emit_wrapper_file, METADATA_FILENAME}; use rustc_middle::middle::dependency_format::Linkage; use rustc_middle::middle::exported_symbols::SymbolExportKind; use rustc_session::config::{self, CFGuard, CrateType, DebugInfo, LdImpl, Lto, Strip}; @@ -24,12 +24,12 @@ use rustc_span::symbol::Symbol; use rustc_span::DebuggerVisualizerFile; use rustc_target::spec::crt_objects::{CrtObjects, LinkSelfContainedDefault}; use rustc_target::spec::{Cc, LinkOutputKind, LinkerFlavor, LinkerFlavorCli, Lld, PanicStrategy}; -use rustc_target::spec::{RelocModel, RelroLevel, SanitizerSet, SplitDebuginfo, Target}; +use rustc_target::spec::{RelocModel, RelroLevel, SanitizerSet, SplitDebuginfo}; use super::archive::{ArchiveBuilder, ArchiveBuilderBuilder}; use super::command::Command; use super::linker::{self, Linker}; -use super::metadata::{create_rmeta_file, MetadataPosition}; +use super::metadata::{create_wrapper_file, MetadataPosition}; use super::rpath::{self, RPathConfig}; use crate::{ errors, looks_like_rust_object_file, CodegenResults, CompiledModule, CrateInfo, NativeLib, @@ -44,7 +44,7 @@ use std::borrow::Borrow; use std::cell::OnceCell; use std::collections::BTreeSet; use std::ffi::OsString; -use std::fs::{File, OpenOptions}; +use std::fs::{read, File, OpenOptions}; use std::io::{BufWriter, Write}; use std::ops::Deref; use std::path::{Path, PathBuf}; @@ -102,7 +102,7 @@ pub fn link_binary<'a>( sess, crate_type, outputs, - codegen_results.crate_info.local_crate_name.as_str(), + codegen_results.crate_info.local_crate_name, ); match crate_type { CrateType::Rlib => { @@ -253,7 +253,7 @@ pub fn each_linked_rlib( }; for &cnum in crates { match fmts.get(cnum.as_usize() - 1) { - Some(&Linkage::NotLinked | &Linkage::IncludedFromDylib) => continue, + Some(&Linkage::NotLinked | &Linkage::Dynamic | &Linkage::IncludedFromDylib) => continue, Some(_) => {} None => return Err(errors::LinkRlibError::MissingFormat), } @@ -292,8 +292,8 @@ fn link_rlib<'a>( let trailing_metadata = match flavor { RlibFlavor::Normal => { let (metadata, metadata_position) = - create_rmeta_file(sess, codegen_results.metadata.raw_data()); - let metadata = emit_metadata(sess, &metadata, tmpdir); + create_wrapper_file(sess, b".rmeta".to_vec(), codegen_results.metadata.raw_data()); + let metadata = emit_wrapper_file(sess, &metadata, tmpdir, METADATA_FILENAME); match metadata_position { MetadataPosition::First => { // Most of the time metadata in rlib files is wrapped in a "dummy" object @@ -376,12 +376,14 @@ fn link_rlib<'a>( let location = find_native_static_library(name.as_str(), lib.verbatim, &lib_search_paths, sess); if sess.opts.unstable_opts.packed_bundled_libs && flavor == RlibFlavor::Normal { - packed_bundled_libs.push(find_native_static_library( - lib.filename.unwrap().as_str(), - Some(true), - &lib_search_paths, - sess, - )); + let filename = lib.filename.unwrap(); + let lib_path = + find_native_static_library(filename.as_str(), true, &lib_search_paths, sess); + let src = read(lib_path) + .map_err(|e| sess.emit_fatal(errors::ReadFileError { message: e }))?; + let (data, _) = create_wrapper_file(sess, b".bundled_lib".to_vec(), &src); + let wrapper_file = emit_wrapper_file(sess, &data, tmpdir, filename.as_str()); + packed_bundled_libs.push(wrapper_file); continue; } ab.add_archive(&location, Box::new(|_| false)).unwrap_or_else(|error| { @@ -459,7 +461,7 @@ fn collate_raw_dylibs<'a, 'b>( for lib in used_libraries { if lib.kind == NativeLibKind::RawDylib { - let ext = if matches!(lib.verbatim, Some(true)) { "" } else { ".dll" }; + let ext = if lib.verbatim { "" } else { ".dll" }; let name = format!("{}{}", lib.name.expect("unnamed raw-dylib library"), ext); let imports = dylib_table.entry(name.clone()).or_default(); for import in &lib.dll_imports { @@ -670,8 +672,7 @@ fn link_dwarf_object<'a>( thorin::MissingReferencedObjectBehaviour::Skip, )?; - let output = package.finish()?.write()?; - let mut output_stream = BufWriter::new( + let output_stream = BufWriter::new( OpenOptions::new() .read(true) .write(true) @@ -679,8 +680,10 @@ fn link_dwarf_object<'a>( .truncate(true) .open(dwp_out_filename)?, ); - output_stream.write_all(&output)?; - output_stream.flush()?; + let mut output_stream = object::write::StreamingBuffer::new(output_stream); + package.finish()?.emit(&mut output_stream)?; + output_stream.result()?; + output_stream.into_inner().flush()?; Ok(()) }) { @@ -919,29 +922,17 @@ fn link_natively<'a>( ) .is_some(); - sess.note_without_error("`link.exe` returned an unexpected error"); + sess.emit_note(errors::LinkExeUnexpectedError); if is_vs_installed && has_linker { // the linker is broken - sess.note_without_error( - "the Visual Studio build tools may need to be repaired \ - using the Visual Studio installer", - ); - sess.note_without_error( - "or a necessary component may be missing from the \ - \"C++ build tools\" workload", - ); + sess.emit_note(errors::RepairVSBuildTools); + sess.emit_note(errors::MissingCppBuildToolComponent); } else if is_vs_installed { // the linker is not installed - sess.note_without_error( - "in the Visual Studio installer, ensure the \ - \"C++ build tools\" workload is selected", - ); + sess.emit_note(errors::SelectCppBuildToolWorkload); } else { // visual studio is not installed - sess.note_without_error( - "you may need to install Visual Studio build tools with the \ - \"C++ build tools\" workload", - ); + sess.emit_note(errors::VisualStudioNotInstalled); } } } @@ -954,35 +945,20 @@ fn link_natively<'a>( Err(e) => { let linker_not_found = e.kind() == io::ErrorKind::NotFound; - let mut linker_error = { - if linker_not_found { - sess.struct_err(&format!("linker `{}` not found", linker_path.display())) - } else { - sess.struct_err(&format!( - "could not exec the linker `{}`", - linker_path.display() - )) - } - }; - - linker_error.note(&e.to_string()); - - if !linker_not_found { - linker_error.note(&format!("{:?}", &cmd)); + if linker_not_found { + sess.emit_err(errors::LinkerNotFound { linker_path, error: e }); + } else { + sess.emit_err(errors::UnableToExeLinker { + linker_path, + error: e, + command_formatted: format!("{:?}", &cmd), + }); } - linker_error.emit(); - if sess.target.is_like_msvc && linker_not_found { - sess.note_without_error( - "the msvc targets depend on the msvc linker \ - but `link.exe` was not found", - ); - sess.note_without_error( - "please ensure that Visual Studio 2017 or later, or Build Tools \ - for Visual Studio were installed with the Visual C++ option.", - ); - sess.note_without_error("VS Code is a different product, and is not sufficient."); + sess.emit_note(errors::MsvcMissingLinker); + sess.emit_note(errors::CheckInstalledVisualStudio); + sess.emit_note(errors::UnsufficientVSCodeProduct); } sess.abort_if_errors(); } @@ -1007,15 +983,13 @@ fn link_natively<'a>( if !prog.status.success() { let mut output = prog.stderr.clone(); output.extend_from_slice(&prog.stdout); - sess.struct_warn(&format!( - "processing debug info with `dsymutil` failed: {}", - prog.status - )) - .note(&escape_string(&output)) - .emit(); + sess.emit_warning(errors::ProcessingDymutilFailed { + status: prog.status, + output: escape_string(&output), + }); } } - Err(e) => sess.fatal(&format!("unable to run `dsymutil`: {}", e)), + Err(error) => sess.emit_fatal(errors::UnableToRunDsymutil { error }), } } @@ -1092,15 +1066,14 @@ fn strip_symbols_with_external_utility<'a>( if !prog.status.success() { let mut output = prog.stderr.clone(); output.extend_from_slice(&prog.stdout); - sess.struct_warn(&format!( - "stripping debug info with `{}` failed: {}", - util, prog.status - )) - .note(&escape_string(&output)) - .emit(); + sess.emit_warning(errors::StrippingDebugInfoFailed { + util, + status: prog.status, + output: escape_string(&output), + }); } } - Err(e) => sess.fatal(&format!("unable to run `{}`: {}", util, e)), + Err(error) => sess.emit_fatal(errors::UnableToRun { util, error }), } } @@ -1153,7 +1126,8 @@ fn link_sanitizer_runtime(sess: &Session, linker: &mut dyn Linker, name: &str) { if path.exists() { return session_tlib; } else { - let default_sysroot = filesearch::get_or_default_sysroot(); + let default_sysroot = + filesearch::get_or_default_sysroot().expect("Failed finding sysroot"); let default_tlib = filesearch::make_target_lib_path( &default_sysroot, sess.opts.target_triple.triple(), @@ -1201,7 +1175,7 @@ pub fn ignored_for_lto(sess: &Session, info: &CrateInfo, cnum: CrateNum) -> bool && (info.compiler_builtins == Some(cnum) || info.is_no_builtins.contains(&cnum)) } -// This functions tries to determine the appropriate linker (and corresponding LinkerFlavor) to use +/// This functions tries to determine the appropriate linker (and corresponding LinkerFlavor) to use pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { fn infer_from( sess: &Session, @@ -1251,7 +1225,7 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { )), (Some(linker), None) => { let stem = linker.file_stem().and_then(|stem| stem.to_str()).unwrap_or_else(|| { - sess.fatal("couldn't extract file stem from specified linker") + sess.emit_fatal(errors::LinkerFileStem); }); let flavor = if stem == "emcc" { @@ -1357,7 +1331,7 @@ fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLib]) { NativeLibKind::Static { bundle: Some(false), .. } | NativeLibKind::Dylib { .. } | NativeLibKind::Unspecified => { - let verbatim = lib.verbatim.unwrap_or(false); + let verbatim = lib.verbatim; if sess.target.is_like_msvc { Some(format!("{}{}", name, if verbatim { "" } else { ".lib" })) } else if sess.target.linker_flavor.is_gnu() { @@ -1378,13 +1352,9 @@ fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLib]) { }) .collect(); if !lib_args.is_empty() { - sess.note_without_error( - "Link against the following native artifacts when linking \ - against this static library. The order and any duplication \ - can be significant on some platforms.", - ); + sess.emit_note(errors::StaticLibraryNativeArtifacts); // Prefix for greppability - sess.note_without_error(&format!("native-static-libs: {}", &lib_args.join(" "))); + sess.emit_note(errors::NativeStaticLibs { arguments: lib_args.join(" ") }); } } @@ -1615,6 +1585,9 @@ fn detect_self_contained_mingw(sess: &Session) -> bool { /// We only provide such support for a very limited number of targets. fn self_contained(sess: &Session, crate_type: CrateType) -> bool { if let Some(self_contained) = sess.opts.cg.link_self_contained { + if sess.target.link_self_contained == LinkSelfContainedDefault::False { + sess.emit_err(errors::UnsupportedLinkSelfContained); + } return self_contained; } @@ -1688,14 +1661,14 @@ fn add_link_script(cmd: &mut dyn Linker, sess: &Session, tmpdir: &Path, crate_ty match (crate_type, &sess.target.link_script) { (CrateType::Cdylib | CrateType::Executable, Some(script)) => { if !sess.target.linker_flavor.is_gnu() { - sess.fatal("can only use link script when linking with GNU-like linker"); + sess.emit_fatal(errors::LinkScriptUnavailable); } let file_name = ["rustc", &sess.target.llvm_target, "linkfile.ld"].join("-"); let path = tmpdir.join(file_name); - if let Err(e) = fs::write(&path, script.as_ref()) { - sess.fatal(&format!("failed to write link script to {}: {}", path.display(), e)); + if let Err(error) = fs::write(&path, script.as_ref()) { + sess.emit_fatal(errors::LinkScriptWriteFailure { path, error }); } cmd.arg("--script"); @@ -1841,8 +1814,8 @@ fn add_linked_symbol_object( let path = tmpdir.join("symbols.o"); let result = std::fs::write(&path, file.write().unwrap()); - if let Err(e) = result { - sess.fatal(&format!("failed to write {}: {}", path.display(), e)); + if let Err(error) = result { + sess.emit_fatal(errors::FailedToWrite { path, error }); } cmd.add_object(&path); } @@ -2040,15 +2013,9 @@ fn linker_with_args<'a>( cmd.add_as_needed(); // Local native libraries of all kinds. - // - // If `-Zlink-native-libraries=false` is set, then the assumption is that an - // external build system already has the native dependencies defined, and it - // will provide them to the linker itself. - if sess.opts.unstable_opts.link_native_libraries { - add_local_native_libraries(cmd, sess, codegen_results); - } + add_local_native_libraries(cmd, sess, archive_builder_builder, codegen_results, tmpdir); - // Upstream rust libraries and their (possibly bundled) static native libraries. + // Upstream rust crates and their non-dynamic native libraries. add_upstream_rust_crates( cmd, sess, @@ -2059,13 +2026,7 @@ fn linker_with_args<'a>( ); // Dynamic native libraries from upstream crates. - // - // FIXME: Merge this to `add_upstream_rust_crates` so that all native libraries are linked - // together with their respective upstream crates, and in their originally specified order. - // This may be slightly breaking due to our use of `--as-needed` and needs a crater run. - if sess.opts.unstable_opts.link_native_libraries { - add_upstream_native_libraries(cmd, sess, codegen_results); - } + add_upstream_native_libraries(cmd, sess, archive_builder_builder, codegen_results, tmpdir); // Link with the import library generated for any raw-dylib functions. for (raw_dylib_name, raw_dylib_imports) in @@ -2299,56 +2260,56 @@ fn collect_natvis_visualizers( visualizer_paths.push(visualizer_out_file); } Err(error) => { - sess.warn( - format!( - "Unable to write debugger visualizer file `{}`: {} ", - visualizer_out_file.display(), - error - ) - .as_str(), - ); + sess.emit_warning(errors::UnableToWriteDebuggerVisualizer { + path: visualizer_out_file, + error, + }); } }; } visualizer_paths } -/// # Native library linking -/// -/// User-supplied library search paths (-L on the command line). These are the same paths used to -/// find Rust crates, so some of them may have been added already by the previous crate linking -/// code. This only allows them to be found at compile time so it is still entirely up to outside -/// forces to make sure that library can be found at runtime. -/// -/// Also note that the native libraries linked here are only the ones located in the current crate. -/// Upstream crates with native library dependencies may have their native library pulled in above. -fn add_local_native_libraries( +fn add_native_libs_from_crate( cmd: &mut dyn Linker, sess: &Session, + archive_builder_builder: &dyn ArchiveBuilderBuilder, codegen_results: &CodegenResults, + tmpdir: &Path, + search_paths: &OnceCell<Vec<PathBuf>>, + bundled_libs: &FxHashSet<Symbol>, + cnum: CrateNum, + link_static: bool, + link_dynamic: bool, ) { - let filesearch = sess.target_filesearch(PathKind::All); - for search_path in filesearch.search_paths() { - match search_path.kind { - PathKind::Framework => { - cmd.framework_path(&search_path.dir); - } - _ => { - cmd.include_path(&fix_windows_verbatim_for_gcc(&search_path.dir)); - } - } + if !sess.opts.unstable_opts.link_native_libraries { + // If `-Zlink-native-libraries=false` is set, then the assumption is that an + // external build system already has the native dependencies defined, and it + // will provide them to the linker itself. + return; } - let relevant_libs = - codegen_results.crate_info.used_libraries.iter().filter(|l| relevant_lib(sess, l)); + if link_static && cnum != LOCAL_CRATE && !bundled_libs.is_empty() { + // If rlib contains native libs as archives, unpack them to tmpdir. + let rlib = &codegen_results.crate_info.used_crate_source[&cnum].rlib.as_ref().unwrap().0; + archive_builder_builder + .extract_bundled_libs(rlib, tmpdir, &bundled_libs) + .unwrap_or_else(|e| sess.emit_fatal(e)); + } - let search_path = OnceCell::new(); - let mut last = (None, NativeLibKind::Unspecified, None); - for lib in relevant_libs { + let native_libs = match cnum { + LOCAL_CRATE => &codegen_results.crate_info.used_libraries, + _ => &codegen_results.crate_info.native_libraries[&cnum], + }; + + let mut last = (None, NativeLibKind::Unspecified, false); + for lib in native_libs { let Some(name) = lib.name else { continue; }; - let name = name.as_str(); + if !relevant_lib(sess, lib) { + continue; + } // Skip if this library is the same as the last. last = if (lib.name, lib.kind, lib.verbatim) == last { @@ -2357,46 +2318,110 @@ fn add_local_native_libraries( (lib.name, lib.kind, lib.verbatim) }; - let verbatim = lib.verbatim.unwrap_or(false); + let name = name.as_str(); + let verbatim = lib.verbatim; match lib.kind { + NativeLibKind::Static { bundle, whole_archive } => { + if link_static { + let bundle = bundle.unwrap_or(true); + let whole_archive = whole_archive == Some(true) + // Backward compatibility case: this can be a rlib (so `+whole-archive` + // cannot be added explicitly if necessary, see the error in `fn link_rlib`) + // compiled as an executable due to `--test`. Use whole-archive implicitly, + // like before the introduction of native lib modifiers. + || (whole_archive == None + && bundle + && cnum == LOCAL_CRATE + && sess.opts.test); + + if bundle && cnum != LOCAL_CRATE { + if let Some(filename) = lib.filename { + // If rlib contains native libs as archives, they are unpacked to tmpdir. + let path = tmpdir.join(filename.as_str()); + if whole_archive { + cmd.link_whole_rlib(&path); + } else { + cmd.link_rlib(&path); + } + } + } else { + if whole_archive { + cmd.link_whole_staticlib( + name, + verbatim, + &search_paths.get_or_init(|| archive_search_paths(sess)), + ); + } else { + cmd.link_staticlib(name, verbatim) + } + } + } + } NativeLibKind::Dylib { as_needed } => { - cmd.link_dylib(name, verbatim, as_needed.unwrap_or(true)) + if link_dynamic { + cmd.link_dylib(name, verbatim, as_needed.unwrap_or(true)) + } } - NativeLibKind::Unspecified => cmd.link_dylib(name, verbatim, true), - NativeLibKind::Framework { as_needed } => { - cmd.link_framework(name, as_needed.unwrap_or(true)) + NativeLibKind::Unspecified => { + if link_dynamic { + cmd.link_dylib(name, verbatim, true); + } } - NativeLibKind::Static { whole_archive, bundle, .. } => { - if whole_archive == Some(true) - // Backward compatibility case: this can be a rlib (so `+whole-archive` cannot - // be added explicitly if necessary, see the error in `fn link_rlib`) compiled - // as an executable due to `--test`. Use whole-archive implicitly, like before - // the introduction of native lib modifiers. - || (whole_archive == None && bundle != Some(false) && sess.opts.test) - { - cmd.link_whole_staticlib( - name, - verbatim, - &search_path.get_or_init(|| archive_search_paths(sess)), - ); - } else { - cmd.link_staticlib(name, verbatim) + NativeLibKind::Framework { as_needed } => { + if link_dynamic { + cmd.link_framework(name, as_needed.unwrap_or(true)) } } NativeLibKind::RawDylib => { - // Ignore RawDylib here, they are handled separately in linker_with_args(). + // Handled separately in `linker_with_args`. } NativeLibKind::LinkArg => { - cmd.arg(name); + if link_static { + cmd.arg(name); + } } } } } -/// # Linking Rust crates and their non-bundled static libraries -/// -/// Rust crates are not considered at all when creating an rlib output. All dependencies will be -/// linked when producing the final output (instead of the intermediate rlib version). +fn add_local_native_libraries( + cmd: &mut dyn Linker, + sess: &Session, + archive_builder_builder: &dyn ArchiveBuilderBuilder, + codegen_results: &CodegenResults, + tmpdir: &Path, +) { + if sess.opts.unstable_opts.link_native_libraries { + // User-supplied library search paths (-L on the command line). These are the same paths + // used to find Rust crates, so some of them may have been added already by the previous + // crate linking code. This only allows them to be found at compile time so it is still + // entirely up to outside forces to make sure that library can be found at runtime. + for search_path in sess.target_filesearch(PathKind::All).search_paths() { + match search_path.kind { + PathKind::Framework => cmd.framework_path(&search_path.dir), + _ => cmd.include_path(&fix_windows_verbatim_for_gcc(&search_path.dir)), + } + } + } + + let search_paths = OnceCell::new(); + // All static and dynamic native library dependencies are linked to the local crate. + let link_static = true; + let link_dynamic = true; + add_native_libs_from_crate( + cmd, + sess, + archive_builder_builder, + codegen_results, + tmpdir, + &search_paths, + &Default::default(), + LOCAL_CRATE, + link_static, + link_dynamic, + ); +} + fn add_upstream_rust_crates<'a>( cmd: &mut dyn Linker, sess: &'a Session, @@ -2412,7 +2437,6 @@ fn add_upstream_rust_crates<'a>( // Linking to a rlib involves just passing it to the linker (the linker // will slurp up the object files inside), and linking to a dynamic library // involves just passing the right -l flag. - let (_, data) = codegen_results .crate_info .dependency_formats @@ -2420,346 +2444,234 @@ fn add_upstream_rust_crates<'a>( .find(|(ty, _)| *ty == crate_type) .expect("failed to find crate type in dependency format list"); - // Invoke get_used_crates to ensure that we get a topological sorting of - // crates. - let deps = &codegen_results.crate_info.used_crates; - - let mut compiler_builtins = None; - let search_path = OnceCell::new(); - - for &cnum in deps.iter() { - // We may not pass all crates through to the linker. Some crates may - // appear statically in an existing dylib, meaning we'll pick up all the - // symbols from the dylib. - let src = &codegen_results.crate_info.used_crate_source[&cnum]; - match data[cnum.as_usize() - 1] { - _ if codegen_results.crate_info.profiler_runtime == Some(cnum) => { - add_static_crate( - cmd, - sess, - archive_builder_builder, - codegen_results, - tmpdir, - cnum, - &Default::default(), - ); - } - // compiler-builtins are always placed last to ensure that they're - // linked correctly. - _ if codegen_results.crate_info.compiler_builtins == Some(cnum) => { - assert!(compiler_builtins.is_none()); - compiler_builtins = Some(cnum); - } - Linkage::NotLinked | Linkage::IncludedFromDylib => {} - Linkage::Static => { - let bundled_libs = if sess.opts.unstable_opts.packed_bundled_libs { - codegen_results.crate_info.native_libraries[&cnum] + let search_paths = OnceCell::new(); + for &cnum in &codegen_results.crate_info.used_crates { + // We may not pass all crates through to the linker. Some crates may appear statically in + // an existing dylib, meaning we'll pick up all the symbols from the dylib. + // We must always link crates `compiler_builtins` and `profiler_builtins` statically. + // Even if they were already included into a dylib + // (e.g. `libstd` when `-C prefer-dynamic` is used). + // FIXME: `dependency_formats` can report `profiler_builtins` as `NotLinked` for some + // reason, it shouldn't do that because `profiler_builtins` should indeed be linked. + let linkage = data[cnum.as_usize() - 1]; + let link_static_crate = linkage == Linkage::Static + || (linkage == Linkage::IncludedFromDylib || linkage == Linkage::NotLinked) + && (codegen_results.crate_info.compiler_builtins == Some(cnum) + || codegen_results.crate_info.profiler_runtime == Some(cnum)); + + let mut bundled_libs = Default::default(); + match linkage { + Linkage::Static | Linkage::IncludedFromDylib | Linkage::NotLinked => { + if link_static_crate { + bundled_libs = codegen_results.crate_info.native_libraries[&cnum] .iter() .filter_map(|lib| lib.filename) - .collect::<FxHashSet<_>>() - } else { - Default::default() - }; - add_static_crate( - cmd, - sess, - archive_builder_builder, - codegen_results, - tmpdir, - cnum, - &bundled_libs, - ); - - // Link static native libs with "-bundle" modifier only if the crate they originate from - // is being linked statically to the current crate. If it's linked dynamically - // or is an rlib already included via some other dylib crate, the symbols from - // native libs will have already been included in that dylib. - // - // If `-Zlink-native-libraries=false` is set, then the assumption is that an - // external build system already has the native dependencies defined, and it - // will provide them to the linker itself. - if sess.opts.unstable_opts.link_native_libraries { - if sess.opts.unstable_opts.packed_bundled_libs { - // If rlib contains native libs as archives, unpack them to tmpdir. - let rlib = &src.rlib.as_ref().unwrap().0; - archive_builder_builder - .extract_bundled_libs(rlib, tmpdir, &bundled_libs) - .unwrap_or_else(|e| sess.fatal(e)); - } - - let mut last = (None, NativeLibKind::Unspecified, None); - for lib in &codegen_results.crate_info.native_libraries[&cnum] { - let Some(name) = lib.name else { - continue; - }; - let name = name.as_str(); - if !relevant_lib(sess, lib) { - continue; - } - - // Skip if this library is the same as the last. - last = if (lib.name, lib.kind, lib.verbatim) == last { - continue; - } else { - (lib.name, lib.kind, lib.verbatim) - }; - - match lib.kind { - NativeLibKind::Static { - bundle: Some(false), - whole_archive: Some(true), - } => { - cmd.link_whole_staticlib( - name, - lib.verbatim.unwrap_or(false), - search_path.get_or_init(|| archive_search_paths(sess)), - ); - } - NativeLibKind::Static { - bundle: Some(false), - whole_archive: Some(false) | None, - } => { - // HACK/FIXME: Fixup a circular dependency between libgcc and libc - // with glibc. This logic should be moved to the libc crate. - if sess.target.os == "linux" - && sess.target.env == "gnu" - && name == "c" - { - cmd.link_staticlib("gcc", false); - } - cmd.link_staticlib(name, lib.verbatim.unwrap_or(false)); - } - NativeLibKind::LinkArg => { - cmd.arg(name); - } - NativeLibKind::Dylib { .. } - | NativeLibKind::Framework { .. } - | NativeLibKind::Unspecified - | NativeLibKind::RawDylib => {} - NativeLibKind::Static { bundle: Some(true) | None, whole_archive } => { - if sess.opts.unstable_opts.packed_bundled_libs { - // If rlib contains native libs as archives, they are unpacked to tmpdir. - let path = tmpdir.join(lib.filename.unwrap().as_str()); - if whole_archive == Some(true) { - cmd.link_whole_rlib(&path); - } else { - cmd.link_rlib(&path); - } - } - } - } - } + .collect(); + add_static_crate( + cmd, + sess, + archive_builder_builder, + codegen_results, + tmpdir, + cnum, + &bundled_libs, + ); } } - Linkage::Dynamic => add_dynamic_crate(cmd, sess, &src.dylib.as_ref().unwrap().0), + Linkage::Dynamic => { + let src = &codegen_results.crate_info.used_crate_source[&cnum]; + add_dynamic_crate(cmd, sess, &src.dylib.as_ref().unwrap().0); + } } - } - // compiler-builtins are always placed last to ensure that they're - // linked correctly. - // We must always link the `compiler_builtins` crate statically. Even if it - // was already "included" in a dylib (e.g., `libstd` when `-C prefer-dynamic` - // is used) - if let Some(cnum) = compiler_builtins { - add_static_crate( + // Static libraries are linked for a subset of linked upstream crates. + // 1. If the upstream crate is a directly linked rlib then we must link the native library + // because the rlib is just an archive. + // 2. If the upstream crate is a dylib or a rlib linked through dylib, then we do not link + // the native library because it is already linked into the dylib, and even if + // inline/const/generic functions from the dylib can refer to symbols from the native + // library, those symbols should be exported and available from the dylib anyway. + // 3. Libraries bundled into `(compiler,profiler)_builtins` are special, see above. + let link_static = link_static_crate; + // Dynamic libraries are not linked here, see the FIXME in `add_upstream_native_libraries`. + let link_dynamic = false; + add_native_libs_from_crate( cmd, sess, archive_builder_builder, codegen_results, tmpdir, + &search_paths, + &bundled_libs, cnum, - &Default::default(), + link_static, + link_dynamic, ); } +} - // Converts a library file-stem into a cc -l argument - fn unlib<'a>(target: &Target, stem: &'a str) -> &'a str { - if stem.starts_with("lib") && !target.is_like_windows { &stem[3..] } else { stem } +fn add_upstream_native_libraries( + cmd: &mut dyn Linker, + sess: &Session, + archive_builder_builder: &dyn ArchiveBuilderBuilder, + codegen_results: &CodegenResults, + tmpdir: &Path, +) { + let search_path = OnceCell::new(); + for &cnum in &codegen_results.crate_info.used_crates { + // Static libraries are not linked here, they are linked in `add_upstream_rust_crates`. + // FIXME: Merge this function to `add_upstream_rust_crates` so that all native libraries + // are linked together with their respective upstream crates, and in their originally + // specified order. This is slightly breaking due to our use of `--as-needed` (see crater + // results in https://github.com/rust-lang/rust/pull/102832#issuecomment-1279772306). + let link_static = false; + // Dynamic libraries are linked for all linked upstream crates. + // 1. If the upstream crate is a directly linked rlib then we must link the native library + // because the rlib is just an archive. + // 2. If the upstream crate is a dylib or a rlib linked through dylib, then we have to link + // the native library too because inline/const/generic functions from the dylib can refer + // to symbols from the native library, so the native library providing those symbols should + // be available when linking our final binary. + let link_dynamic = true; + add_native_libs_from_crate( + cmd, + sess, + archive_builder_builder, + codegen_results, + tmpdir, + &search_path, + &Default::default(), + cnum, + link_static, + link_dynamic, + ); } +} - // Adds the static "rlib" versions of all crates to the command line. - // There's a bit of magic which happens here specifically related to LTO, - // namely that we remove upstream object files. - // - // When performing LTO, almost(*) all of the bytecode from the upstream - // libraries has already been included in our object file output. As a - // result we need to remove the object files in the upstream libraries so - // the linker doesn't try to include them twice (or whine about duplicate - // symbols). We must continue to include the rest of the rlib, however, as - // it may contain static native libraries which must be linked in. - // - // (*) Crates marked with `#![no_builtins]` don't participate in LTO and - // their bytecode wasn't included. The object files in those libraries must - // still be passed to the linker. - // - // Note, however, that if we're not doing LTO we can just pass the rlib - // blindly to the linker (fast) because it's fine if it's not actually - // included as we're at the end of the dependency chain. - fn add_static_crate<'a>( - cmd: &mut dyn Linker, - sess: &'a Session, - archive_builder_builder: &dyn ArchiveBuilderBuilder, - codegen_results: &CodegenResults, - tmpdir: &Path, - cnum: CrateNum, - bundled_lib_file_names: &FxHashSet<Symbol>, - ) { - let src = &codegen_results.crate_info.used_crate_source[&cnum]; - let cratepath = &src.rlib.as_ref().unwrap().0; - - let mut link_upstream = |path: &Path| { - cmd.link_rlib(&fix_windows_verbatim_for_gcc(path)); - }; - - // See the comment above in `link_staticlib` and `link_rlib` for why if - // there's a static library that's not relevant we skip all object - // files. - let native_libs = &codegen_results.crate_info.native_libraries[&cnum]; - let skip_native = native_libs.iter().any(|lib| { - matches!(lib.kind, NativeLibKind::Static { bundle: None | Some(true), .. }) - && !relevant_lib(sess, lib) - }); - - if (!are_upstream_rust_objects_already_included(sess) - || ignored_for_lto(sess, &codegen_results.crate_info, cnum)) - && !skip_native - { - link_upstream(cratepath); - return; - } - - let dst = tmpdir.join(cratepath.file_name().unwrap()); - let name = cratepath.file_name().unwrap().to_str().unwrap(); - let name = &name[3..name.len() - 5]; // chop off lib/.rlib - let bundled_lib_file_names = bundled_lib_file_names.clone(); - - sess.prof.generic_activity_with_arg("link_altering_rlib", name).run(|| { - let canonical_name = name.replace('-', "_"); - let upstream_rust_objects_already_included = - are_upstream_rust_objects_already_included(sess); - let is_builtins = sess.target.no_builtins - || !codegen_results.crate_info.is_no_builtins.contains(&cnum); - - let mut archive = archive_builder_builder.new_archive_builder(sess); - if let Err(e) = archive.add_archive( - cratepath, - Box::new(move |f| { - if f == METADATA_FILENAME { - return true; - } +// Adds the static "rlib" versions of all crates to the command line. +// There's a bit of magic which happens here specifically related to LTO, +// namely that we remove upstream object files. +// +// When performing LTO, almost(*) all of the bytecode from the upstream +// libraries has already been included in our object file output. As a +// result we need to remove the object files in the upstream libraries so +// the linker doesn't try to include them twice (or whine about duplicate +// symbols). We must continue to include the rest of the rlib, however, as +// it may contain static native libraries which must be linked in. +// +// (*) Crates marked with `#![no_builtins]` don't participate in LTO and +// their bytecode wasn't included. The object files in those libraries must +// still be passed to the linker. +// +// Note, however, that if we're not doing LTO we can just pass the rlib +// blindly to the linker (fast) because it's fine if it's not actually +// included as we're at the end of the dependency chain. +fn add_static_crate<'a>( + cmd: &mut dyn Linker, + sess: &'a Session, + archive_builder_builder: &dyn ArchiveBuilderBuilder, + codegen_results: &CodegenResults, + tmpdir: &Path, + cnum: CrateNum, + bundled_lib_file_names: &FxHashSet<Symbol>, +) { + let src = &codegen_results.crate_info.used_crate_source[&cnum]; + let cratepath = &src.rlib.as_ref().unwrap().0; - let canonical = f.replace('-', "_"); - - let is_rust_object = - canonical.starts_with(&canonical_name) && looks_like_rust_object_file(&f); - - // If we've been requested to skip all native object files - // (those not generated by the rust compiler) then we can skip - // this file. See above for why we may want to do this. - let skip_because_cfg_say_so = skip_native && !is_rust_object; - - // If we're performing LTO and this is a rust-generated object - // file, then we don't need the object file as it's part of the - // LTO module. Note that `#![no_builtins]` is excluded from LTO, - // though, so we let that object file slide. - let skip_because_lto = - upstream_rust_objects_already_included && is_rust_object && is_builtins; - - // We skip native libraries because: - // 1. This native libraries won't be used from the generated rlib, - // so we can throw them away to avoid the copying work. - // 2. We can't allow it to be a single remaining entry in archive - // as some linkers may complain on that. - if bundled_lib_file_names.contains(&Symbol::intern(f)) { - return true; - } + let mut link_upstream = |path: &Path| { + cmd.link_rlib(&fix_windows_verbatim_for_gcc(path)); + }; - if skip_because_cfg_say_so || skip_because_lto { - return true; - } + // See the comment above in `link_staticlib` and `link_rlib` for why if + // there's a static library that's not relevant we skip all object + // files. + let native_libs = &codegen_results.crate_info.native_libraries[&cnum]; + let skip_native = native_libs.iter().any(|lib| { + matches!(lib.kind, NativeLibKind::Static { bundle: None | Some(true), .. }) + && !relevant_lib(sess, lib) + }); - false - }), - ) { - sess.fatal(&format!("failed to build archive from rlib: {}", e)); - } - if archive.build(&dst) { - link_upstream(&dst); - } - }); + if (!are_upstream_rust_objects_already_included(sess) + || ignored_for_lto(sess, &codegen_results.crate_info, cnum)) + && !skip_native + { + link_upstream(cratepath); + return; } - // Same thing as above, but for dynamic crates instead of static crates. - fn add_dynamic_crate(cmd: &mut dyn Linker, sess: &Session, cratepath: &Path) { - // Just need to tell the linker about where the library lives and - // what its name is - let parent = cratepath.parent(); - if let Some(dir) = parent { - cmd.include_path(&fix_windows_verbatim_for_gcc(dir)); - } - let filestem = cratepath.file_stem().unwrap().to_str().unwrap(); - cmd.link_rust_dylib( - &unlib(&sess.target, filestem), - parent.unwrap_or_else(|| Path::new("")), - ); - } -} + let dst = tmpdir.join(cratepath.file_name().unwrap()); + let name = cratepath.file_name().unwrap().to_str().unwrap(); + let name = &name[3..name.len() - 5]; // chop off lib/.rlib + let bundled_lib_file_names = bundled_lib_file_names.clone(); -/// Link in all of our upstream crates' native dependencies. Remember that all of these upstream -/// native dependencies are all non-static dependencies. We've got two cases then: -/// -/// 1. The upstream crate is an rlib. In this case we *must* link in the native dependency because -/// the rlib is just an archive. -/// -/// 2. The upstream crate is a dylib. In order to use the dylib, we have to have the dependency -/// present on the system somewhere. Thus, we don't gain a whole lot from not linking in the -/// dynamic dependency to this crate as well. -/// -/// The use case for this is a little subtle. In theory the native dependencies of a crate are -/// purely an implementation detail of the crate itself, but the problem arises with generic and -/// inlined functions. If a generic function calls a native function, then the generic function -/// must be instantiated in the target crate, meaning that the native symbol must also be resolved -/// in the target crate. -fn add_upstream_native_libraries( - cmd: &mut dyn Linker, - sess: &Session, - codegen_results: &CodegenResults, -) { - let mut last = (None, NativeLibKind::Unspecified, None); - for &cnum in &codegen_results.crate_info.used_crates { - for lib in codegen_results.crate_info.native_libraries[&cnum].iter() { - let Some(name) = lib.name else { - continue; - }; - let name = name.as_str(); - if !relevant_lib(sess, &lib) { - continue; - } + sess.prof.generic_activity_with_arg("link_altering_rlib", name).run(|| { + let canonical_name = name.replace('-', "_"); + let upstream_rust_objects_already_included = + are_upstream_rust_objects_already_included(sess); + let is_builtins = + sess.target.no_builtins || !codegen_results.crate_info.is_no_builtins.contains(&cnum); - // Skip if this library is the same as the last. - last = if (lib.name, lib.kind, lib.verbatim) == last { - continue; - } else { - (lib.name, lib.kind, lib.verbatim) - }; + let mut archive = archive_builder_builder.new_archive_builder(sess); + if let Err(e) = archive.add_archive( + cratepath, + Box::new(move |f| { + if f == METADATA_FILENAME { + return true; + } - let verbatim = lib.verbatim.unwrap_or(false); - match lib.kind { - NativeLibKind::Dylib { as_needed } => { - cmd.link_dylib(name, verbatim, as_needed.unwrap_or(true)) + let canonical = f.replace('-', "_"); + + let is_rust_object = + canonical.starts_with(&canonical_name) && looks_like_rust_object_file(&f); + + // If we've been requested to skip all native object files + // (those not generated by the rust compiler) then we can skip + // this file. See above for why we may want to do this. + let skip_because_cfg_say_so = skip_native && !is_rust_object; + + // If we're performing LTO and this is a rust-generated object + // file, then we don't need the object file as it's part of the + // LTO module. Note that `#![no_builtins]` is excluded from LTO, + // though, so we let that object file slide. + let skip_because_lto = + upstream_rust_objects_already_included && is_rust_object && is_builtins; + + // We skip native libraries because: + // 1. This native libraries won't be used from the generated rlib, + // so we can throw them away to avoid the copying work. + // 2. We can't allow it to be a single remaining entry in archive + // as some linkers may complain on that. + if bundled_lib_file_names.contains(&Symbol::intern(f)) { + return true; } - NativeLibKind::Unspecified => cmd.link_dylib(name, verbatim, true), - NativeLibKind::Framework { as_needed } => { - cmd.link_framework(name, as_needed.unwrap_or(true)) + + if skip_because_cfg_say_so || skip_because_lto { + return true; } - // ignore static native libraries here as we've - // already included them in add_local_native_libraries and - // add_upstream_rust_crates - NativeLibKind::Static { .. } => {} - NativeLibKind::RawDylib | NativeLibKind::LinkArg => {} - } + + false + }), + ) { + sess.fatal(&format!("failed to build archive from rlib: {}", e)); } - } + if archive.build(&dst) { + link_upstream(&dst); + } + }); +} + +// Same thing as above, but for dynamic crates instead of static crates. +fn add_dynamic_crate(cmd: &mut dyn Linker, sess: &Session, cratepath: &Path) { + // Just need to tell the linker about where the library lives and + // what its name is + let parent = cratepath.parent(); + if let Some(dir) = parent { + cmd.include_path(&fix_windows_verbatim_for_gcc(dir)); + } + let stem = cratepath.file_stem().unwrap().to_str().unwrap(); + // Convert library file-stem into a cc -l argument. + let prefix = if stem.starts_with("lib") && !sess.target.is_like_windows { 3 } else { 0 }; + cmd.link_rust_dylib(&stem[prefix..], parent.unwrap_or_else(|| Path::new(""))); } fn relevant_lib(sess: &Session, lib: &NativeLib) -> bool { @@ -2813,14 +2725,14 @@ fn add_apple_sdk(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) { ("arm", "watchos") => "watchos", (_, "macos") => "macosx", _ => { - sess.err(&format!("unsupported arch `{}` for os `{}`", arch, os)); + sess.emit_err(errors::UnsupportedArch { arch, os }); return; } }; let sdk_root = match get_apple_sdk_root(sdk_name) { Ok(s) => s, Err(e) => { - sess.err(&e); + sess.emit_err(e); return; } }; @@ -2836,7 +2748,7 @@ fn add_apple_sdk(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) { } } -fn get_apple_sdk_root(sdk_name: &str) -> Result<String, String> { +fn get_apple_sdk_root(sdk_name: &str) -> Result<String, errors::AppleSdkRootError<'_>> { // Following what clang does // (https://github.com/llvm/llvm-project/blob/ // 296a80102a9b72c3eda80558fb78a3ed8849b341/clang/lib/Driver/ToolChains/Darwin.cpp#L1661-L1678) @@ -2886,7 +2798,7 @@ fn get_apple_sdk_root(sdk_name: &str) -> Result<String, String> { match res { Ok(output) => Ok(output.trim().to_string()), - Err(e) => Err(format!("failed to get {} SDK path: {}", sdk_name, e)), + Err(error) => Err(errors::AppleSdkRootError::SdkPath { sdk_name, error }), } } @@ -2919,7 +2831,7 @@ fn add_gcc_ld_path(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) { } } } else { - sess.fatal("option `-Z gcc-ld` is used even though linker flavor is not gcc"); + sess.emit_fatal(errors::OptionGccOnly); } } } diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs index c49b19bdf..f087d903e 100644 --- a/compiler/rustc_codegen_ssa/src/back/linker.rs +++ b/compiler/rustc_codegen_ssa/src/back/linker.rs @@ -34,9 +34,9 @@ pub fn disable_localization(linker: &mut Command) { linker.env("VSLANG", "1033"); } -// The third parameter is for env vars, used on windows to set up the -// path for MSVC to find its DLLs, and gcc to find its bundled -// toolchain +/// The third parameter is for env vars, used on windows to set up the +/// path for MSVC to find its DLLs, and gcc to find its bundled +/// toolchain pub fn get_linker<'a>( sess: &'a Session, linker: &Path, @@ -515,7 +515,7 @@ impl<'a> Linker for GccLinker<'a> { // -force_load is the macOS equivalent of --whole-archive, but it // involves passing the full path to the library to link. self.linker_arg("-force_load"); - let lib = find_native_static_library(lib, Some(verbatim), search_path, &self.sess); + let lib = find_native_static_library(lib, verbatim, search_path, &self.sess); self.linker_arg(&lib); } } @@ -1260,11 +1260,11 @@ impl<'a> Linker for WasmLd<'a> { } fn link_whole_staticlib(&mut self, lib: &str, _verbatim: bool, _search_path: &[PathBuf]) { - self.cmd.arg("-l").arg(lib); + self.cmd.arg("--whole-archive").arg("-l").arg(lib).arg("--no-whole-archive"); } fn link_whole_rlib(&mut self, lib: &Path) { - self.cmd.arg(lib); + self.cmd.arg("--whole-archive").arg(lib).arg("--no-whole-archive"); } fn gc_sections(&mut self, _keep_metadata: bool) { diff --git a/compiler/rustc_codegen_ssa/src/back/metadata.rs b/compiler/rustc_codegen_ssa/src/back/metadata.rs index 99ddd1764..51c5c375d 100644 --- a/compiler/rustc_codegen_ssa/src/back/metadata.rs +++ b/compiler/rustc_codegen_ssa/src/back/metadata.rs @@ -60,7 +60,7 @@ impl MetadataLoader for DefaultMetadataLoader { let data = entry .data(data) .map_err(|e| format!("failed to parse rlib '{}': {}", path.display(), e))?; - return search_for_metadata(path, data, ".rmeta"); + return search_for_section(path, data, ".rmeta"); } } @@ -69,11 +69,11 @@ impl MetadataLoader for DefaultMetadataLoader { } fn get_dylib_metadata(&self, _target: &Target, path: &Path) -> Result<MetadataRef, String> { - load_metadata_with(path, |data| search_for_metadata(path, data, ".rustc")) + load_metadata_with(path, |data| search_for_section(path, data, ".rustc")) } } -fn search_for_metadata<'a>( +pub(super) fn search_for_section<'a>( path: &Path, bytes: &'a [u8], section: &str, @@ -191,39 +191,43 @@ pub enum MetadataPosition { Last, } -// For rlibs we "pack" rustc metadata into a dummy object file. -// -// Historically it was needed because rustc linked rlibs as whole-archive in some cases. -// In that case linkers try to include all files located in an archive, so if metadata is stored -// in an archive then it needs to be of a form that the linker is able to process. -// Now it's not clear whether metadata still needs to be wrapped into an object file or not. -// -// Note, though, that we don't actually want this metadata to show up in any -// final output of the compiler. Instead this is purely for rustc's own -// metadata tracking purposes. -// -// With the above in mind, each "flavor" of object format gets special -// handling here depending on the target: -// -// * MachO - macos-like targets will insert the metadata into a section that -// is sort of fake dwarf debug info. Inspecting the source of the macos -// linker this causes these sections to be skipped automatically because -// it's not in an allowlist of otherwise well known dwarf section names to -// go into the final artifact. -// -// * WebAssembly - we actually don't have any container format for this -// target. WebAssembly doesn't support the `dylib` crate type anyway so -// there's no need for us to support this at this time. Consequently the -// metadata bytes are simply stored as-is into an rlib. -// -// * COFF - Windows-like targets create an object with a section that has -// the `IMAGE_SCN_LNK_REMOVE` flag set which ensures that if the linker -// ever sees the section it doesn't process it and it's removed. -// -// * ELF - All other targets are similar to Windows in that there's a -// `SHF_EXCLUDE` flag we can set on sections in an object file to get -// automatically removed from the final output. -pub fn create_rmeta_file(sess: &Session, metadata: &[u8]) -> (Vec<u8>, MetadataPosition) { +/// For rlibs we "pack" rustc metadata into a dummy object file. +/// +/// Historically it was needed because rustc linked rlibs as whole-archive in some cases. +/// In that case linkers try to include all files located in an archive, so if metadata is stored +/// in an archive then it needs to be of a form that the linker is able to process. +/// Now it's not clear whether metadata still needs to be wrapped into an object file or not. +/// +/// Note, though, that we don't actually want this metadata to show up in any +/// final output of the compiler. Instead this is purely for rustc's own +/// metadata tracking purposes. +/// +/// With the above in mind, each "flavor" of object format gets special +/// handling here depending on the target: +/// +/// * MachO - macos-like targets will insert the metadata into a section that +/// is sort of fake dwarf debug info. Inspecting the source of the macos +/// linker this causes these sections to be skipped automatically because +/// it's not in an allowlist of otherwise well known dwarf section names to +/// go into the final artifact. +/// +/// * WebAssembly - we actually don't have any container format for this +/// target. WebAssembly doesn't support the `dylib` crate type anyway so +/// there's no need for us to support this at this time. Consequently the +/// metadata bytes are simply stored as-is into an rlib. +/// +/// * COFF - Windows-like targets create an object with a section that has +/// the `IMAGE_SCN_LNK_REMOVE` flag set which ensures that if the linker +/// ever sees the section it doesn't process it and it's removed. +/// +/// * ELF - All other targets are similar to Windows in that there's a +/// `SHF_EXCLUDE` flag we can set on sections in an object file to get +/// automatically removed from the final output. +pub fn create_wrapper_file( + sess: &Session, + section_name: Vec<u8>, + data: &[u8], +) -> (Vec<u8>, MetadataPosition) { let Some(mut file) = create_object_file(sess) else { // This is used to handle all "other" targets. This includes targets // in two categories: @@ -241,11 +245,11 @@ pub fn create_rmeta_file(sess: &Session, metadata: &[u8]) -> (Vec<u8>, MetadataP // WebAssembly and for targets not supported by the `object` crate // yet it means that work will need to be done in the `object` crate // to add a case above. - return (metadata.to_vec(), MetadataPosition::Last); + return (data.to_vec(), MetadataPosition::Last); }; let section = file.add_section( file.segment_name(StandardSegment::Debug).to_vec(), - b".rmeta".to_vec(), + section_name, SectionKind::Debug, ); match file.format() { @@ -259,7 +263,7 @@ pub fn create_rmeta_file(sess: &Session, metadata: &[u8]) -> (Vec<u8>, MetadataP } _ => {} }; - file.append_section_data(section, metadata, 1); + file.append_section_data(section, data, 1); (file.write().unwrap(), MetadataPosition::First) } diff --git a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs index c2ecc4160..22f534d90 100644 --- a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs +++ b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs @@ -180,7 +180,8 @@ fn exported_symbols_provider_local<'tcx>( .collect(); if tcx.entry_fn(()).is_some() { - let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, "main")); + let exported_symbol = + ExportedSymbol::NoDefId(SymbolName::new(tcx, tcx.sess.target.entry_name.as_ref())); symbols.push(( exported_symbol, @@ -193,8 +194,11 @@ fn exported_symbols_provider_local<'tcx>( } if tcx.allocator_kind(()).is_some() { - for method in ALLOCATOR_METHODS { - let symbol_name = format!("__rust_{}", method.name); + for symbol_name in ALLOCATOR_METHODS + .iter() + .map(|method| format!("__rust_{}", method.name)) + .chain(["__rust_alloc_error_handler".to_string(), OomStrategy::SYMBOL.to_string()]) + { let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, &symbol_name)); symbols.push(( diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs index d0ac016b0..12fca6496 100644 --- a/compiler/rustc_codegen_ssa/src/back/write.rs +++ b/compiler/rustc_codegen_ssa/src/back/write.rs @@ -15,10 +15,8 @@ use rustc_data_structures::profiling::TimingGuard; use rustc_data_structures::profiling::VerboseTimingGuard; use rustc_data_structures::sync::Lrc; use rustc_errors::emitter::Emitter; -use rustc_errors::{ - translation::{to_fluent_args, Translate}, - DiagnosticId, FatalError, Handler, Level, -}; +use rustc_errors::{translation::Translate, DiagnosticId, FatalError, Handler, Level}; +use rustc_errors::{DiagnosticMessage, Style}; use rustc_fs_util::link_or_copy; use rustc_hir::def_id::{CrateNum, LOCAL_CRATE}; use rustc_incremental::{ @@ -38,6 +36,7 @@ use rustc_span::{BytePos, FileName, InnerSpan, Pos, Span}; use rustc_target::spec::{MergeFunctions, SanitizerSet}; use std::any::Any; +use std::borrow::Cow; use std::fs; use std::io; use std::marker::PhantomData; @@ -341,20 +340,20 @@ pub struct CodegenContext<B: WriteBackendMethods> { pub split_debuginfo: rustc_target::spec::SplitDebuginfo, pub split_dwarf_kind: rustc_session::config::SplitDwarfKind, - // Number of cgus excluding the allocator/metadata modules + /// Number of cgus excluding the allocator/metadata modules pub total_cgus: usize, - // Handler to use for diagnostics produced during codegen. + /// Handler to use for diagnostics produced during codegen. pub diag_emitter: SharedEmitter, - // LLVM optimizations for which we want to print remarks. + /// LLVM optimizations for which we want to print remarks. pub remark: Passes, - // Worker thread number + /// Worker thread number pub worker: usize, - // The incremental compilation session directory, or None if we are not - // compiling incrementally + /// The incremental compilation session directory, or None if we are not + /// compiling incrementally pub incr_comp_session_dir: Option<PathBuf>, - // Used to update CGU re-use information during the thinlto phase. + /// Used to update CGU re-use information during the thinlto phase. pub cgu_reuse_tracker: CguReuseTracker, - // Channel back to the main control thread to send messages to + /// Channel back to the main control thread to send messages to pub coordinator_send: Sender<Box<dyn Any + Send>>, } @@ -757,7 +756,7 @@ fn execute_work_item<B: ExtraBackendMethods>( } } -// Actual LTO type we end up choosing based on multiple factors. +/// Actual LTO type we end up choosing based on multiple factors. pub enum ComputedLtoType { No, Thin, @@ -969,8 +968,11 @@ pub enum Message<B: WriteBackendMethods> { CodegenAborted, } +type DiagnosticArgName<'source> = Cow<'source, str>; + struct Diagnostic { - msg: String, + msg: Vec<(DiagnosticMessage, Style)>, + args: FxHashMap<DiagnosticArgName<'static>, rustc_errors::DiagnosticArgValue<'static>>, code: Option<DiagnosticId>, lvl: Level, } @@ -1743,15 +1745,18 @@ impl Translate for SharedEmitter { impl Emitter for SharedEmitter { fn emit_diagnostic(&mut self, diag: &rustc_errors::Diagnostic) { - let fluent_args = to_fluent_args(diag.args()); + let args: FxHashMap<Cow<'_, str>, rustc_errors::DiagnosticArgValue<'_>> = + diag.args().map(|(name, arg)| (name.clone(), arg.clone())).collect(); drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { - msg: self.translate_messages(&diag.message, &fluent_args).to_string(), + msg: diag.message.clone(), + args: args.clone(), code: diag.code.clone(), lvl: diag.level(), }))); for child in &diag.children { drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { - msg: self.translate_messages(&child.message, &fluent_args).to_string(), + msg: child.message.clone(), + args: args.clone(), code: None, lvl: child.level, }))); @@ -1782,10 +1787,11 @@ impl SharedEmitterMain { match message { Ok(SharedEmitterMessage::Diagnostic(diag)) => { let handler = sess.diagnostic(); - let mut d = rustc_errors::Diagnostic::new(diag.lvl, &diag.msg); + let mut d = rustc_errors::Diagnostic::new_with_messages(diag.lvl, diag.msg); if let Some(code) = diag.code { d.code(code); } + d.replace_args(diag.args); handler.emit_diagnostic(&mut d); } Ok(SharedEmitterMessage::InlineAsmError(cookie, msg, level, source)) => { diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index 84b89cd71..4f396e970 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -22,7 +22,6 @@ use rustc_data_structures::sync::ParallelIterator; use rustc_hir as hir; use rustc_hir::def_id::{DefId, LOCAL_CRATE}; use rustc_hir::lang_items::LangItem; -use rustc_hir::weak_lang_items::WEAK_ITEMS_SYMBOLS; use rustc_index::vec::Idx; use rustc_metadata::EncodedMetadata; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; @@ -639,7 +638,14 @@ pub fn codegen_crate<B: ExtraBackendMethods>( let llmod_id = cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("allocator")).to_string(); let module_llvm = tcx.sess.time("write_allocator_module", || { - backend.codegen_allocator(tcx, &llmod_id, kind, tcx.lang_items().oom().is_some()) + backend.codegen_allocator( + tcx, + &llmod_id, + kind, + // If allocator_kind is Some then alloc_error_handler_kind must + // also be Some. + tcx.alloc_error_handler_kind(()).unwrap(), + ) }); Some(ModuleCodegen { name: llmod_id, module_llvm, kind: ModuleKind::Allocator }) @@ -827,20 +833,30 @@ impl CrateInfo { // // In order to get this left-to-right dependency ordering, we use the reverse // postorder of all crates putting the leaves at the right-most positions. - let used_crates = tcx + let mut compiler_builtins = None; + let mut used_crates: Vec<_> = tcx .postorder_cnums(()) .iter() .rev() .copied() - .filter(|&cnum| !tcx.dep_kind(cnum).macros_only()) + .filter(|&cnum| { + let link = !tcx.dep_kind(cnum).macros_only(); + if link && tcx.is_compiler_builtins(cnum) { + compiler_builtins = Some(cnum); + return false; + } + link + }) .collect(); + // `compiler_builtins` are always placed last to ensure that they're linked correctly. + used_crates.extend(compiler_builtins); let mut info = CrateInfo { target_cpu, exported_symbols, linked_symbols, local_crate_name, - compiler_builtins: None, + compiler_builtins, profiler_runtime: None, is_no_builtins: Default::default(), native_libraries: Default::default(), @@ -866,9 +882,6 @@ impl CrateInfo { let used_crate_source = tcx.used_crate_source(cnum); info.used_crate_source.insert(cnum, used_crate_source.clone()); - if tcx.is_compiler_builtins(cnum) { - info.compiler_builtins = Some(cnum); - } if tcx.is_profiler_runtime(cnum) { info.profiler_runtime = Some(cnum); } @@ -887,14 +900,14 @@ impl CrateInfo { // by the compiler, but that's ok because all this stuff is unstable anyway. let target = &tcx.sess.target; if !are_upstream_rust_objects_already_included(tcx.sess) { - let missing_weak_lang_items: FxHashSet<&Symbol> = info + let missing_weak_lang_items: FxHashSet<Symbol> = info .used_crates .iter() - .flat_map(|cnum| { - tcx.missing_lang_items(*cnum) - .iter() - .filter(|l| lang_items::required(tcx, **l)) - .filter_map(|item| WEAK_ITEMS_SYMBOLS.get(item)) + .flat_map(|&cnum| tcx.missing_lang_items(cnum)) + .filter(|l| l.is_weak()) + .filter_map(|&l| { + let name = l.link_name()?; + lang_items::required(tcx, l).then_some(name) }) .collect(); let prefix = if target.is_like_windows && target.arch == "x86" { "_" } else { "" }; diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs index e05646e1e..b004fbf85 100644 --- a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs +++ b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs @@ -1,4 +1,4 @@ -// Type Names for Debug Info. +//! Type Names for Debug Info. // Notes on targeting MSVC: // In general, MSVC's debugger attempts to parse all arguments as C++ expressions, @@ -26,10 +26,10 @@ use std::fmt::Write; use crate::debuginfo::wants_c_like_enum_debuginfo; -// Compute the name of the type as it should be stored in debuginfo. Does not do -// any caching, i.e., calling the function twice with the same type will also do -// the work twice. The `qualified` parameter only affects the first level of the -// type name, further levels (i.e., type parameters) are always fully qualified. +/// Compute the name of the type as it should be stored in debuginfo. Does not do +/// any caching, i.e., calling the function twice with the same type will also do +/// the work twice. The `qualified` parameter only affects the first level of the +/// type name, further levels (i.e., type parameters) are always fully qualified. pub fn compute_debuginfo_type_name<'tcx>( tcx: TyCtxt<'tcx>, t: Ty<'tcx>, @@ -59,7 +59,13 @@ fn push_debuginfo_type_name<'tcx>( match *t.kind() { ty::Bool => output.push_str("bool"), ty::Char => output.push_str("char"), - ty::Str => output.push_str("str"), + ty::Str => { + if cpp_like_debuginfo { + output.push_str("str$") + } else { + output.push_str("str") + } + } ty::Never => { if cpp_like_debuginfo { output.push_str("never$"); @@ -152,25 +158,19 @@ fn push_debuginfo_type_name<'tcx>( } } ty::Ref(_, inner_type, mutbl) => { - // Slices and `&str` are treated like C++ pointers when computing debug - // info for MSVC debugger. However, wrapping these types' names in a synthetic type - // causes the .natvis engine for WinDbg to fail to display their data, so we opt these - // types out to aid debugging in MSVC. - let is_slice_or_str = matches!(*inner_type.kind(), ty::Slice(_) | ty::Str); - - if !cpp_like_debuginfo { - output.push('&'); - output.push_str(mutbl.prefix_str()); - } else if !is_slice_or_str { + if cpp_like_debuginfo { match mutbl { Mutability::Not => output.push_str("ref$<"), Mutability::Mut => output.push_str("ref_mut$<"), } + } else { + output.push('&'); + output.push_str(mutbl.prefix_str()); } push_debuginfo_type_name(tcx, inner_type, qualified, output, visited); - if cpp_like_debuginfo && !is_slice_or_str { + if cpp_like_debuginfo { push_close_angle_bracket(cpp_like_debuginfo, output); } } @@ -195,7 +195,7 @@ fn push_debuginfo_type_name<'tcx>( } ty::Slice(inner_type) => { if cpp_like_debuginfo { - output.push_str("slice$<"); + output.push_str("slice2$<"); } else { output.push('['); } diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs index ebb531f1c..e3b6fbf1b 100644 --- a/compiler/rustc_codegen_ssa/src/errors.rs +++ b/compiler/rustc_codegen_ssa/src/errors.rs @@ -354,3 +354,197 @@ impl IntoDiagnostic<'_> for LinkingFailed<'_> { diag } } + +#[derive(Diagnostic)] +#[diag(codegen_ssa_link_exe_unexpected_error)] +pub struct LinkExeUnexpectedError; + +#[derive(Diagnostic)] +#[diag(codegen_ssa_repair_vs_build_tools)] +pub struct RepairVSBuildTools; + +#[derive(Diagnostic)] +#[diag(codegen_ssa_missing_cpp_build_tool_component)] +pub struct MissingCppBuildToolComponent; + +#[derive(Diagnostic)] +#[diag(codegen_ssa_select_cpp_build_tool_workload)] +pub struct SelectCppBuildToolWorkload; + +#[derive(Diagnostic)] +#[diag(codegen_ssa_visual_studio_not_installed)] +pub struct VisualStudioNotInstalled; + +#[derive(Diagnostic)] +#[diag(codegen_ssa_linker_not_found)] +#[note] +pub struct LinkerNotFound { + pub linker_path: PathBuf, + pub error: Error, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_unable_to_exe_linker)] +#[note] +#[note(command_note)] +pub struct UnableToExeLinker { + pub linker_path: PathBuf, + pub error: Error, + pub command_formatted: String, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_msvc_missing_linker)] +pub struct MsvcMissingLinker; + +#[derive(Diagnostic)] +#[diag(codegen_ssa_check_installed_visual_studio)] +pub struct CheckInstalledVisualStudio; + +#[derive(Diagnostic)] +#[diag(codegen_ssa_unsufficient_vs_code_product)] +pub struct UnsufficientVSCodeProduct; + +#[derive(Diagnostic)] +#[diag(codegen_ssa_processing_dymutil_failed)] +#[note] +pub struct ProcessingDymutilFailed { + pub status: ExitStatus, + pub output: String, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_unable_to_run_dsymutil)] +#[note] +pub struct UnableToRunDsymutil { + pub error: Error, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_stripping_debu_info_failed)] +#[note] +pub struct StrippingDebugInfoFailed<'a> { + pub util: &'a str, + pub status: ExitStatus, + pub output: String, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_unable_to_run)] +pub struct UnableToRun<'a> { + pub util: &'a str, + pub error: Error, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_linker_file_stem)] +pub struct LinkerFileStem; + +#[derive(Diagnostic)] +#[diag(codegen_ssa_static_library_native_artifacts)] +pub struct StaticLibraryNativeArtifacts; + +#[derive(Diagnostic)] +#[diag(codegen_ssa_native_static_libs)] +pub struct NativeStaticLibs { + pub arguments: String, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_link_script_unavailable)] +pub struct LinkScriptUnavailable; + +#[derive(Diagnostic)] +#[diag(codegen_ssa_link_script_write_failure)] +pub struct LinkScriptWriteFailure { + pub path: PathBuf, + pub error: Error, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_failed_to_write)] +pub struct FailedToWrite { + pub path: PathBuf, + pub error: Error, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_unable_to_write_debugger_visualizer)] +pub struct UnableToWriteDebuggerVisualizer { + pub path: PathBuf, + pub error: Error, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_rlib_archive_build_failure)] +pub struct RlibArchiveBuildFailure { + pub error: Error, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_option_gcc_only)] +pub struct OptionGccOnly; + +#[derive(Diagnostic)] +pub enum ExtractBundledLibsError<'a> { + #[diag(codegen_ssa_extract_bundled_libs_open_file)] + OpenFile { rlib: &'a Path, error: Box<dyn std::error::Error> }, + + #[diag(codegen_ssa_extract_bundled_libs_mmap_file)] + MmapFile { rlib: &'a Path, error: Box<dyn std::error::Error> }, + + #[diag(codegen_ssa_extract_bundled_libs_parse_archive)] + ParseArchive { rlib: &'a Path, error: Box<dyn std::error::Error> }, + + #[diag(codegen_ssa_extract_bundled_libs_read_entry)] + ReadEntry { rlib: &'a Path, error: Box<dyn std::error::Error> }, + + #[diag(codegen_ssa_extract_bundled_libs_archive_member)] + ArchiveMember { rlib: &'a Path, error: Box<dyn std::error::Error> }, + + #[diag(codegen_ssa_extract_bundled_libs_convert_name)] + ConvertName { rlib: &'a Path, error: Box<dyn std::error::Error> }, + + #[diag(codegen_ssa_extract_bundled_libs_write_file)] + WriteFile { rlib: &'a Path, error: Box<dyn std::error::Error> }, + + #[diag(codegen_ssa_extract_bundled_libs_write_file)] + ExtractSection { rlib: &'a Path, error: Box<dyn std::error::Error> }, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_unsupported_arch)] +pub struct UnsupportedArch<'a> { + pub arch: &'a str, + pub os: &'a str, +} + +#[derive(Diagnostic)] +pub enum AppleSdkRootError<'a> { + #[diag(codegen_ssa_apple_sdk_error_sdk_path)] + SdkPath { sdk_name: &'a str, error: Error }, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_read_file)] +pub struct ReadFileError { + pub message: std::io::Error, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_unsupported_link_self_contained)] +pub struct UnsupportedLinkSelfContained; + +#[derive(Diagnostic)] +#[diag(codegen_ssa_archive_build_failure)] +// Public for rustc_codegen_llvm::back::archive +pub struct ArchiveBuildFailure { + pub error: std::io::Error, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_unknown_archive_kind)] +// Public for rustc_codegen_llvm::back::archive +pub struct UnknownArchiveKind<'a> { + pub kind: &'a str, +} diff --git a/compiler/rustc_codegen_ssa/src/glue.rs b/compiler/rustc_codegen_ssa/src/glue.rs index e6f402ef1..6015d48de 100644 --- a/compiler/rustc_codegen_ssa/src/glue.rs +++ b/compiler/rustc_codegen_ssa/src/glue.rs @@ -15,7 +15,7 @@ pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( ) -> (Bx::Value, Bx::Value) { let layout = bx.layout_of(t); debug!("size_and_align_of_dst(ty={}, info={:?}): layout: {:?}", t, info, layout); - if !layout.is_unsized() { + if layout.is_sized() { let size = bx.const_usize(layout.size.bytes()); let align = bx.const_usize(layout.align.abi.bytes()); return (size, align); diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs index ceebe4d41..def6390f6 100644 --- a/compiler/rustc_codegen_ssa/src/lib.rs +++ b/compiler/rustc_codegen_ssa/src/lib.rs @@ -1,12 +1,13 @@ #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] -#![feature(box_patterns)] -#![feature(try_blocks)] -#![feature(once_cell)] #![feature(associated_type_bounds)] -#![feature(strict_provenance)] -#![feature(int_roundings)] +#![feature(box_patterns)] #![feature(if_let_guard)] +#![feature(int_roundings)] +#![feature(let_chains)] #![feature(never_type)] +#![feature(once_cell)] +#![feature(strict_provenance)] +#![feature(try_blocks)] #![recursion_limit = "256"] #![allow(rustc::potential_query_instability)] @@ -115,7 +116,7 @@ pub struct NativeLib { pub name: Option<Symbol>, pub filename: Option<Symbol>, pub cfg: Option<ast::MetaItem>, - pub verbatim: Option<bool>, + pub verbatim: bool, pub dll_imports: Vec<cstore::DllImport>, } @@ -126,7 +127,7 @@ impl From<&cstore::NativeLib> for NativeLib { filename: lib.filename, name: lib.name, cfg: lib.cfg.clone(), - verbatim: lib.verbatim, + verbatim: lib.verbatim.unwrap_or(false), dll_imports: lib.dll_imports.clone(), } } diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 29b7c9b0a..03d833fbb 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -1,7 +1,7 @@ use super::operand::OperandRef; use super::operand::OperandValue::{Immediate, Pair, Ref}; use super::place::PlaceRef; -use super::{FunctionCx, LocalRef}; +use super::{CachedLlbb, FunctionCx, LocalRef}; use crate::base; use crate::common::{self, IntPredicate}; @@ -17,6 +17,7 @@ use rustc_middle::mir::{self, AssertKind, SwitchTargets}; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf}; use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths}; use rustc_middle::ty::{self, Instance, Ty, TypeVisitable}; +use rustc_session::config::OptLevel; use rustc_span::source_map::Span; use rustc_span::{sym, Symbol}; use rustc_symbol_mangling::typeid::typeid_for_fnabi; @@ -24,6 +25,15 @@ use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode, Reg}; use rustc_target::abi::{self, HasDataLayout, WrappingRange}; use rustc_target::spec::abi::Abi; +// Indicates if we are in the middle of merging a BB's successor into it. This +// can happen when BB jumps directly to its successor and the successor has no +// other predecessors. +#[derive(Debug, PartialEq)] +enum MergingSucc { + False, + True, +} + /// Used by `FunctionCx::codegen_terminator` for emitting common patterns /// e.g., creating a basic block, calling a function, etc. struct TerminatorCodegenHelper<'tcx> { @@ -63,31 +73,6 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { } } - /// Get a basic block (creating it if necessary), possibly with a landing - /// pad next to it. - fn llbb_with_landing_pad<Bx: BuilderMethods<'a, 'tcx>>( - &self, - fx: &mut FunctionCx<'a, 'tcx, Bx>, - target: mir::BasicBlock, - ) -> (Bx::BasicBlock, bool) { - let span = self.terminator.source_info.span; - let lltarget = fx.llbb(target); - let target_funclet = fx.cleanup_kinds[target].funclet_bb(target); - match (self.funclet_bb, target_funclet) { - (None, None) => (lltarget, false), - // jump *into* cleanup - need a landing pad if GNU, cleanup pad if MSVC - (None, Some(_)) => (fx.landing_pad_for(target), false), - (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", self.terminator), - (Some(f), Some(t_f)) => { - if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) { - (lltarget, false) - } else { - (fx.landing_pad_for(target), true) - } - } - } - } - /// Get a basic block (creating it if necessary), possibly with cleanup /// stuff in it or next to it. fn llbb_with_cleanup<Bx: BuilderMethods<'a, 'tcx>>( @@ -95,7 +80,11 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { fx: &mut FunctionCx<'a, 'tcx, Bx>, target: mir::BasicBlock, ) -> Bx::BasicBlock { - let (lltarget, is_cleanupret) = self.llbb_with_landing_pad(fx, target); + let (needs_landing_pad, is_cleanupret) = self.llbb_characteristics(fx, target); + let mut lltarget = fx.llbb(target); + if needs_landing_pad { + lltarget = fx.landing_pad_for(target); + } if is_cleanupret { // MSVC cross-funclet jump - need a trampoline debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess)); @@ -110,20 +99,54 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { } } + fn llbb_characteristics<Bx: BuilderMethods<'a, 'tcx>>( + &self, + fx: &mut FunctionCx<'a, 'tcx, Bx>, + target: mir::BasicBlock, + ) -> (bool, bool) { + let target_funclet = fx.cleanup_kinds[target].funclet_bb(target); + let (needs_landing_pad, is_cleanupret) = match (self.funclet_bb, target_funclet) { + (None, None) => (false, false), + (None, Some(_)) => (true, false), + (Some(_), None) => { + let span = self.terminator.source_info.span; + span_bug!(span, "{:?} - jump out of cleanup?", self.terminator); + } + (Some(f), Some(t_f)) => { + if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) { + (false, false) + } else { + (true, true) + } + } + }; + (needs_landing_pad, is_cleanupret) + } + fn funclet_br<Bx: BuilderMethods<'a, 'tcx>>( &self, fx: &mut FunctionCx<'a, 'tcx, Bx>, bx: &mut Bx, target: mir::BasicBlock, - ) { - let (lltarget, is_cleanupret) = self.llbb_with_landing_pad(fx, target); - if is_cleanupret { - // MSVC micro-optimization: generate a `ret` rather than a jump - // to a trampoline. - debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess)); - bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget)); + mergeable_succ: bool, + ) -> MergingSucc { + let (needs_landing_pad, is_cleanupret) = self.llbb_characteristics(fx, target); + if mergeable_succ && !needs_landing_pad && !is_cleanupret { + // We can merge the successor into this bb, so no need for a `br`. + MergingSucc::True } else { - bx.br(lltarget); + let mut lltarget = fx.llbb(target); + if needs_landing_pad { + lltarget = fx.landing_pad_for(target); + } + if is_cleanupret { + // micro-optimization: generate a `ret` rather than a jump + // to a trampoline. + bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget)); + } else { + bx.br(lltarget); + } + MergingSucc::False } } @@ -139,7 +162,8 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>, cleanup: Option<mir::BasicBlock>, copied_constant_arguments: &[PlaceRef<'tcx, <Bx as BackendTypes>::Value>], - ) { + mergeable_succ: bool, + ) -> MergingSucc { // If there is a cleanup block and the function we're calling can unwind, then // do an invoke, otherwise do a call. let fn_ty = bx.fn_decl_backend_type(&fn_abi); @@ -190,6 +214,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { } fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret); } + MergingSucc::False } else { let llret = bx.call(fn_ty, Some(&fn_abi), fn_ptr, &llargs, self.funclet(fx)); if fx.mir[self.bb].is_cleanup { @@ -205,9 +230,10 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { bx.lifetime_end(tmp.llval, tmp.layout.size); } fx.store_return(bx, ret_dest, &fn_abi.ret, llret); - self.funclet_br(fx, bx, target); + self.funclet_br(fx, bx, target, mergeable_succ) } else { bx.unreachable(); + MergingSucc::False } } } @@ -224,7 +250,8 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { destination: Option<mir::BasicBlock>, cleanup: Option<mir::BasicBlock>, instance: Instance<'_>, - ) { + mergeable_succ: bool, + ) -> MergingSucc { if let Some(cleanup) = cleanup { let ret_llbb = if let Some(target) = destination { fx.llbb(target) @@ -240,13 +267,15 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { instance, Some((ret_llbb, self.llbb_with_cleanup(fx, cleanup), self.funclet(fx))), ); + MergingSucc::False } else { bx.codegen_inline_asm(template, &operands, options, line_spans, instance, None); if let Some(target) = destination { - self.funclet_br(fx, bx, target); + self.funclet_br(fx, bx, target, mergeable_succ) } else { bx.unreachable(); + MergingSucc::False } } } @@ -255,16 +284,16 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { /// Codegen implementations for some terminator variants. impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { /// Generates code for a `Resume` terminator. - fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, mut bx: Bx) { + fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, bx: &mut Bx) { if let Some(funclet) = helper.funclet(self) { bx.cleanup_ret(funclet, None); } else { - let slot = self.get_personality_slot(&mut bx); - let lp0 = slot.project_field(&mut bx, 0); + let slot = self.get_personality_slot(bx); + let lp0 = slot.project_field(bx, 0); let lp0 = bx.load_operand(lp0).immediate(); - let lp1 = slot.project_field(&mut bx, 1); + let lp1 = slot.project_field(bx, 1); let lp1 = bx.load_operand(lp1).immediate(); - slot.storage_dead(&mut bx); + slot.storage_dead(bx); let mut lp = bx.const_undef(self.landing_pad_type()); lp = bx.insert_value(lp, lp0, 0); @@ -276,22 +305,23 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { fn codegen_switchint_terminator( &mut self, helper: TerminatorCodegenHelper<'tcx>, - mut bx: Bx, + bx: &mut Bx, discr: &mir::Operand<'tcx>, switch_ty: Ty<'tcx>, targets: &SwitchTargets, ) { - let discr = self.codegen_operand(&mut bx, &discr); + let discr = self.codegen_operand(bx, &discr); // `switch_ty` is redundant, sanity-check that. assert_eq!(discr.layout.ty, switch_ty); let mut target_iter = targets.iter(); if target_iter.len() == 1 { - // If there are two targets (one conditional, one fallback), emit br instead of switch + // If there are two targets (one conditional, one fallback), emit `br` instead of + // `switch`. let (test_value, target) = target_iter.next().unwrap(); let lltrue = helper.llbb_with_cleanup(self, target); let llfalse = helper.llbb_with_cleanup(self, targets.otherwise()); if switch_ty == bx.tcx().types.bool { - // Don't generate trivial icmps when switching on bool + // Don't generate trivial icmps when switching on bool. match test_value { 0 => bx.cond_br(discr.immediate(), llfalse, lltrue), 1 => bx.cond_br(discr.immediate(), lltrue, llfalse), @@ -303,6 +333,30 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval); bx.cond_br(cmp, lltrue, llfalse); } + } else if self.cx.sess().opts.optimize == OptLevel::No + && target_iter.len() == 2 + && self.mir[targets.otherwise()].is_empty_unreachable() + { + // In unoptimized builds, if there are two normal targets and the `otherwise` target is + // an unreachable BB, emit `br` instead of `switch`. This leaves behind the unreachable + // BB, which will usually (but not always) be dead code. + // + // Why only in unoptimized builds? + // - In unoptimized builds LLVM uses FastISel which does not support switches, so it + // must fall back to the to the slower SelectionDAG isel. Therefore, using `br` gives + // significant compile time speedups for unoptimized builds. + // - In optimized builds the above doesn't hold, and using `br` sometimes results in + // worse generated code because LLVM can no longer tell that the value being switched + // on can only have two values, e.g. 0 and 1. + // + let (test_value1, target1) = target_iter.next().unwrap(); + let (_test_value2, target2) = target_iter.next().unwrap(); + let ll1 = helper.llbb_with_cleanup(self, target1); + let ll2 = helper.llbb_with_cleanup(self, target2); + let switch_llty = bx.immediate_backend_type(bx.layout_of(switch_ty)); + let llval = bx.const_uint_big(switch_llty, test_value1); + let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval); + bx.cond_br(cmp, ll1, ll2); } else { bx.switch( discr.immediate(), @@ -312,7 +366,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } - fn codegen_return_terminator(&mut self, mut bx: Bx) { + fn codegen_return_terminator(&mut self, bx: &mut Bx) { // Call `va_end` if this is the definition of a C-variadic function. if self.fn_abi.c_variadic { // The `VaList` "spoofed" argument is just after all the real arguments. @@ -342,11 +396,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } PassMode::Direct(_) | PassMode::Pair(..) => { - let op = self.codegen_consume(&mut bx, mir::Place::return_place().as_ref()); + let op = self.codegen_consume(bx, mir::Place::return_place().as_ref()); if let Ref(llval, _, align) = op.val { bx.load(bx.backend_type(op.layout), llval, align) } else { - op.immediate_or_packed_pair(&mut bx) + op.immediate_or_packed_pair(bx) } } @@ -362,8 +416,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; let llslot = match op.val { Immediate(_) | Pair(..) => { - let scratch = PlaceRef::alloca(&mut bx, self.fn_abi.ret.layout); - op.val.store(&mut bx, scratch); + let scratch = PlaceRef::alloca(bx, self.fn_abi.ret.layout); + op.val.store(bx, scratch); scratch.llval } Ref(llval, _, align) => { @@ -383,22 +437,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { fn codegen_drop_terminator( &mut self, helper: TerminatorCodegenHelper<'tcx>, - mut bx: Bx, + bx: &mut Bx, location: mir::Place<'tcx>, target: mir::BasicBlock, unwind: Option<mir::BasicBlock>, - ) { + mergeable_succ: bool, + ) -> MergingSucc { let ty = location.ty(self.mir, bx.tcx()).ty; let ty = self.monomorphize(ty); let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty); if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { // we don't actually need to drop anything. - helper.funclet_br(self, &mut bx, target); - return; + return helper.funclet_br(self, bx, target, mergeable_succ); } - let place = self.codegen_place(&mut bx, location.as_ref()); + let place = self.codegen_place(bx, location.as_ref()); let (args1, args2); let mut args = if let Some(llextra) = place.llextra { args2 = [place.llval, llextra]; @@ -436,7 +490,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { args = &args[..1]; ( meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE) - .get_fn(&mut bx, vtable, ty, &fn_abi), + .get_fn(bx, vtable, ty, &fn_abi), fn_abi, ) } @@ -481,7 +535,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { debug!("args' = {:?}", args); ( meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE) - .get_fn(&mut bx, vtable, ty, &fn_abi), + .get_fn(bx, vtable, ty, &fn_abi), fn_abi, ) } @@ -489,29 +543,31 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; helper.do_call( self, - &mut bx, + bx, fn_abi, drop_fn, args, Some((ReturnDest::Nothing, target)), unwind, &[], - ); + mergeable_succ, + ) } fn codegen_assert_terminator( &mut self, helper: TerminatorCodegenHelper<'tcx>, - mut bx: Bx, + bx: &mut Bx, terminator: &mir::Terminator<'tcx>, cond: &mir::Operand<'tcx>, expected: bool, msg: &mir::AssertMessage<'tcx>, target: mir::BasicBlock, cleanup: Option<mir::BasicBlock>, - ) { + mergeable_succ: bool, + ) -> MergingSucc { let span = terminator.source_info.span; - let cond = self.codegen_operand(&mut bx, cond).immediate(); + let cond = self.codegen_operand(bx, cond).immediate(); let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1); // This case can currently arise only from functions marked @@ -529,8 +585,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Don't codegen the panic block if success if known. if const_cond == Some(expected) { - helper.funclet_br(self, &mut bx, target); - return; + return helper.funclet_br(self, bx, target, mergeable_succ); } // Pass the condition through llvm.expect for branch hinting. @@ -547,16 +602,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // After this point, bx is the block for the call to panic. bx.switch_to_block(panic_block); - self.set_debug_loc(&mut bx, terminator.source_info); + self.set_debug_loc(bx, terminator.source_info); // Get the location information. - let location = self.get_caller_location(&mut bx, terminator.source_info).immediate(); + let location = self.get_caller_location(bx, terminator.source_info).immediate(); // Put together the arguments to the panic entry point. let (lang_item, args) = match msg { AssertKind::BoundsCheck { ref len, ref index } => { - let len = self.codegen_operand(&mut bx, len).immediate(); - let index = self.codegen_operand(&mut bx, index).immediate(); + let len = self.codegen_operand(bx, len).immediate(); + let index = self.codegen_operand(bx, index).immediate(); // It's `fn panic_bounds_check(index: usize, len: usize)`, // and `#[track_caller]` adds an implicit third argument. (LangItem::PanicBoundsCheck, vec![index, len, location]) @@ -569,29 +624,32 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } }; - let (fn_abi, llfn) = common::build_langcall(&bx, Some(span), lang_item); + let (fn_abi, llfn) = common::build_langcall(bx, Some(span), lang_item); // Codegen the actual panic invoke/call. - helper.do_call(self, &mut bx, fn_abi, llfn, &args, None, cleanup, &[]); + let merging_succ = helper.do_call(self, bx, fn_abi, llfn, &args, None, cleanup, &[], false); + assert_eq!(merging_succ, MergingSucc::False); + MergingSucc::False } fn codegen_abort_terminator( &mut self, helper: TerminatorCodegenHelper<'tcx>, - mut bx: Bx, + bx: &mut Bx, terminator: &mir::Terminator<'tcx>, ) { let span = terminator.source_info.span; - self.set_debug_loc(&mut bx, terminator.source_info); + self.set_debug_loc(bx, terminator.source_info); // Obtain the panic entry point. - let (fn_abi, llfn) = common::build_langcall(&bx, Some(span), LangItem::PanicNoUnwind); + let (fn_abi, llfn) = common::build_langcall(bx, Some(span), LangItem::PanicNoUnwind); // Codegen the actual panic invoke/call. - helper.do_call(self, &mut bx, fn_abi, llfn, &[], None, None, &[]); + let merging_succ = helper.do_call(self, bx, fn_abi, llfn, &[], None, None, &[], false); + assert_eq!(merging_succ, MergingSucc::False); } - /// Returns `true` if this is indeed a panic intrinsic and codegen is done. + /// Returns `Some` if this is indeed a panic intrinsic and codegen is done. fn codegen_panic_intrinsic( &mut self, helper: &TerminatorCodegenHelper<'tcx>, @@ -601,7 +659,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { source_info: mir::SourceInfo, target: Option<mir::BasicBlock>, cleanup: Option<mir::BasicBlock>, - ) -> bool { + mergeable_succ: bool, + ) -> Option<MergingSucc> { // Emit a panic or a no-op for `assert_*` intrinsics. // These are intrinsics that compile to panics so that we can get a message // which mentions the offending type, even from a const context. @@ -627,7 +686,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { ZeroValid => !bx.tcx().permits_zero_init(layout), UninitValid => !bx.tcx().permits_uninit_init(layout), }; - if do_panic { + Some(if do_panic { let msg_str = with_no_visible_paths!({ with_no_trimmed_paths!({ if layout.abi.is_uninhabited() { @@ -660,22 +719,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { target.as_ref().map(|bb| (ReturnDest::Nothing, *bb)), cleanup, &[], - ); + mergeable_succ, + ) } else { // a NOP let target = target.unwrap(); - helper.funclet_br(self, bx, target) - } - true + helper.funclet_br(self, bx, target, mergeable_succ) + }) } else { - false + None } } fn codegen_call_terminator( &mut self, helper: TerminatorCodegenHelper<'tcx>, - mut bx: Bx, + bx: &mut Bx, terminator: &mir::Terminator<'tcx>, func: &mir::Operand<'tcx>, args: &[mir::Operand<'tcx>], @@ -683,12 +742,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { target: Option<mir::BasicBlock>, cleanup: Option<mir::BasicBlock>, fn_span: Span, - ) { + mergeable_succ: bool, + ) -> MergingSucc { let source_info = terminator.source_info; let span = source_info.span; // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. - let callee = self.codegen_operand(&mut bx, func); + let callee = self.codegen_operand(bx, func); let (instance, mut llfn) = match *callee.layout.ty.kind() { ty::FnDef(def_id, substs) => ( @@ -708,8 +768,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if let Some(ty::InstanceDef::DropGlue(_, None)) = def { // Empty drop glue; a no-op. let target = target.unwrap(); - helper.funclet_br(self, &mut bx, target); - return; + return helper.funclet_br(self, bx, target, mergeable_succ); } // FIXME(eddyb) avoid computing this if possible, when `instance` is @@ -736,9 +795,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; if intrinsic == Some(sym::transmute) { - if let Some(target) = target { - self.codegen_transmute(&mut bx, &args[0], destination); - helper.funclet_br(self, &mut bx, target); + return if let Some(target) = target { + self.codegen_transmute(bx, &args[0], destination); + helper.funclet_br(self, bx, target, mergeable_succ) } else { // If we are trying to transmute to an uninhabited type, // it is likely there is no allotted destination. In fact, @@ -748,20 +807,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // it must be unreachable. assert_eq!(fn_abi.ret.layout.abi, abi::Abi::Uninhabited); bx.unreachable(); - } - return; + MergingSucc::False + }; } - if self.codegen_panic_intrinsic( + if let Some(merging_succ) = self.codegen_panic_intrinsic( &helper, - &mut bx, + bx, intrinsic, instance, source_info, target, cleanup, + mergeable_succ, ) { - return; + return merging_succ; } // The arguments we'll be passing. Plus one to account for outptr, if used. @@ -771,23 +831,24 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Prepare the return value destination let ret_dest = if target.is_some() { let is_intrinsic = intrinsic.is_some(); - self.make_return_dest(&mut bx, destination, &fn_abi.ret, &mut llargs, is_intrinsic) + self.make_return_dest(bx, destination, &fn_abi.ret, &mut llargs, is_intrinsic) } else { ReturnDest::Nothing }; if intrinsic == Some(sym::caller_location) { - if let Some(target) = target { - let location = self - .get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info }); + return if let Some(target) = target { + let location = + self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info }); if let ReturnDest::IndirectOperand(tmp, _) = ret_dest { - location.val.store(&mut bx, tmp); + location.val.store(bx, tmp); } - self.store_return(&mut bx, ret_dest, &fn_abi.ret, location.immediate()); - helper.funclet_br(self, &mut bx, target); - } - return; + self.store_return(bx, ret_dest, &fn_abi.ret, location.immediate()); + helper.funclet_br(self, bx, target, mergeable_succ) + } else { + MergingSucc::False + }; } match intrinsic { @@ -831,12 +892,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } - self.codegen_operand(&mut bx, arg) + self.codegen_operand(bx, arg) }) .collect(); Self::codegen_intrinsic_call( - &mut bx, + bx, *instance.as_ref().unwrap(), &fn_abi, &args, @@ -845,16 +906,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { ); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { - self.store_return(&mut bx, ret_dest, &fn_abi.ret, dst.llval); + self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval); } - if let Some(target) = target { - helper.funclet_br(self, &mut bx, target); + return if let Some(target) = target { + helper.funclet_br(self, bx, target, mergeable_succ) } else { bx.unreachable(); - } - - return; + MergingSucc::False + }; } } @@ -868,7 +928,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let mut copied_constant_arguments = vec![]; 'make_args: for (i, arg) in first_args.iter().enumerate() { - let mut op = self.codegen_operand(&mut bx, arg); + let mut op = self.codegen_operand(bx, arg); if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) { match op.val { @@ -878,12 +938,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // that is understood elsewhere in the compiler as a method on // `dyn Trait`. // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until - // we get a value of a built-in pointer type + // we get a value of a built-in pointer type. + // + // This is also relevant for `Pin<&mut Self>`, where we need to peel the `Pin`. 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr() && !op.layout.ty.is_region_ptr() { for i in 0..op.layout.fields.count() { - let field = op.extract_field(&mut bx, i); + let field = op.extract_field(bx, i); if !field.layout.is_zst() { // we found the one non-zero-sized field that is allowed // now find *its* non-zero-sized field, or stop if it's a @@ -900,7 +962,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // data pointer and vtable. Look up the method in the vtable, and pass // the data pointer as the first argument llfn = Some(meth::VirtualIndex::from_index(idx).get_fn( - &mut bx, + bx, meta, op.layout.ty, &fn_abi, @@ -911,7 +973,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { Ref(data_ptr, Some(meta), _) => { // by-value dynamic dispatch llfn = Some(meth::VirtualIndex::from_index(idx).get_fn( - &mut bx, + bx, meta, op.layout.ty, &fn_abi, @@ -920,19 +982,35 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { continue; } Immediate(_) => { - let ty::Ref(_, ty, _) = op.layout.ty.kind() else { - span_bug!(span, "can't codegen a virtual call on {:#?}", op); - }; - if !ty.is_dyn_star() { + // See comment above explaining why we peel these newtypes + 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr() + && !op.layout.ty.is_region_ptr() + { + for i in 0..op.layout.fields.count() { + let field = op.extract_field(bx, i); + if !field.layout.is_zst() { + // we found the one non-zero-sized field that is allowed + // now find *its* non-zero-sized field, or stop if it's a + // pointer + op = field; + continue 'descend_newtypes; + } + } + + span_bug!(span, "receiver has no non-zero-sized fields {:?}", op); + } + + // Make sure that we've actually unwrapped the rcvr down + // to a pointer or ref to `dyn* Trait`. + if !op.layout.ty.builtin_deref(true).unwrap().ty.is_dyn_star() { span_bug!(span, "can't codegen a virtual call on {:#?}", op); } - // FIXME(dyn-star): Make sure this is done on a &dyn* receiver let place = op.deref(bx.cx()); - let data_ptr = place.project_field(&mut bx, 0); - let meta_ptr = place.project_field(&mut bx, 1); + let data_ptr = place.project_field(bx, 0); + let meta_ptr = place.project_field(bx, 1); let meta = bx.load_operand(meta_ptr); llfn = Some(meth::VirtualIndex::from_index(idx).get_fn( - &mut bx, + bx, meta.immediate(), op.layout.ty, &fn_abi, @@ -951,24 +1029,19 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { match (arg, op.val) { (&mir::Operand::Copy(_), Ref(_, None, _)) | (&mir::Operand::Constant(_), Ref(_, None, _)) => { - let tmp = PlaceRef::alloca(&mut bx, op.layout); + let tmp = PlaceRef::alloca(bx, op.layout); bx.lifetime_start(tmp.llval, tmp.layout.size); - op.val.store(&mut bx, tmp); + op.val.store(bx, tmp); op.val = Ref(tmp.llval, None, tmp.align); copied_constant_arguments.push(tmp); } _ => {} } - self.codegen_argument(&mut bx, op, &mut llargs, &fn_abi.args[i]); + self.codegen_argument(bx, op, &mut llargs, &fn_abi.args[i]); } let num_untupled = untuple.map(|tup| { - self.codegen_arguments_untupled( - &mut bx, - tup, - &mut llargs, - &fn_abi.args[first_args.len()..], - ) + self.codegen_arguments_untupled(bx, tup, &mut llargs, &fn_abi.args[first_args.len()..]) }); let needs_location = @@ -988,14 +1061,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { fn_abi, ); let location = - self.get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info }); + self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info }); debug!( "codegen_call_terminator({:?}): location={:?} (fn_span {:?})", terminator, location, fn_span ); let last_arg = fn_abi.args.last().unwrap(); - self.codegen_argument(&mut bx, location, &mut llargs, last_arg); + self.codegen_argument(bx, location, &mut llargs, last_arg); } let (is_indirect_call, fn_ptr) = match (llfn, instance) { @@ -1020,40 +1093,43 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.cond_br(cond, bb_pass, bb_fail); bx.switch_to_block(bb_pass); - helper.do_call( + let merging_succ = helper.do_call( self, - &mut bx, + bx, fn_abi, fn_ptr, &llargs, target.as_ref().map(|&target| (ret_dest, target)), cleanup, &copied_constant_arguments, + false, ); + assert_eq!(merging_succ, MergingSucc::False); bx.switch_to_block(bb_fail); bx.abort(); bx.unreachable(); - return; + return MergingSucc::False; } helper.do_call( self, - &mut bx, + bx, fn_abi, fn_ptr, &llargs, target.as_ref().map(|&target| (ret_dest, target)), cleanup, &copied_constant_arguments, - ); + mergeable_succ, + ) } fn codegen_asm_terminator( &mut self, helper: TerminatorCodegenHelper<'tcx>, - mut bx: Bx, + bx: &mut Bx, terminator: &mir::Terminator<'tcx>, template: &[ast::InlineAsmTemplatePiece], operands: &[mir::InlineAsmOperand<'tcx>], @@ -1062,24 +1138,25 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { destination: Option<mir::BasicBlock>, cleanup: Option<mir::BasicBlock>, instance: Instance<'_>, - ) { + mergeable_succ: bool, + ) -> MergingSucc { let span = terminator.source_info.span; let operands: Vec<_> = operands .iter() .map(|op| match *op { mir::InlineAsmOperand::In { reg, ref value } => { - let value = self.codegen_operand(&mut bx, value); + let value = self.codegen_operand(bx, value); InlineAsmOperandRef::In { reg, value } } mir::InlineAsmOperand::Out { reg, late, ref place } => { - let place = place.map(|place| self.codegen_place(&mut bx, place.as_ref())); + let place = place.map(|place| self.codegen_place(bx, place.as_ref())); InlineAsmOperandRef::Out { reg, late, place } } mir::InlineAsmOperand::InOut { reg, late, ref in_value, ref out_place } => { - let in_value = self.codegen_operand(&mut bx, in_value); + let in_value = self.codegen_operand(bx, in_value); let out_place = - out_place.map(|out_place| self.codegen_place(&mut bx, out_place.as_ref())); + out_place.map(|out_place| self.codegen_place(bx, out_place.as_ref())); InlineAsmOperandRef::InOut { reg, late, in_value, out_place } } mir::InlineAsmOperand::Const { ref value } => { @@ -1117,7 +1194,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { helper.do_inlineasm( self, - &mut bx, + bx, template, &operands, options, @@ -1125,71 +1202,128 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { destination, cleanup, instance, - ); + mergeable_succ, + ) } } impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { - pub fn codegen_block(&mut self, bb: mir::BasicBlock) { - let llbb = self.llbb(bb); - let mut bx = Bx::build(self.cx, llbb); + pub fn codegen_block(&mut self, mut bb: mir::BasicBlock) { + let llbb = match self.try_llbb(bb) { + Some(llbb) => llbb, + None => return, + }; + let bx = &mut Bx::build(self.cx, llbb); let mir = self.mir; - let data = &mir[bb]; - debug!("codegen_block({:?}={:?})", bb, data); + // MIR basic blocks stop at any function call. This may not be the case + // for the backend's basic blocks, in which case we might be able to + // combine multiple MIR basic blocks into a single backend basic block. + loop { + let data = &mir[bb]; - for statement in &data.statements { - bx = self.codegen_statement(bx, statement); - } + debug!("codegen_block({:?}={:?})", bb, data); + + for statement in &data.statements { + self.codegen_statement(bx, statement); + } + + let merging_succ = self.codegen_terminator(bx, bb, data.terminator()); + if let MergingSucc::False = merging_succ { + break; + } - self.codegen_terminator(bx, bb, data.terminator()); + // We are merging the successor into the produced backend basic + // block. Record that the successor should be skipped when it is + // reached. + // + // Note: we must not have already generated code for the successor. + // This is implicitly ensured by the reverse postorder traversal, + // and the assertion explicitly guarantees that. + let mut successors = data.terminator().successors(); + let succ = successors.next().unwrap(); + assert!(matches!(self.cached_llbbs[succ], CachedLlbb::None)); + self.cached_llbbs[succ] = CachedLlbb::Skip; + bb = succ; + } } fn codegen_terminator( &mut self, - mut bx: Bx, + bx: &mut Bx, bb: mir::BasicBlock, terminator: &'tcx mir::Terminator<'tcx>, - ) { + ) -> MergingSucc { debug!("codegen_terminator: {:?}", terminator); // Create the cleanup bundle, if needed. let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb); let helper = TerminatorCodegenHelper { bb, terminator, funclet_bb }; - self.set_debug_loc(&mut bx, terminator.source_info); + let mergeable_succ = || { + // Note: any call to `switch_to_block` will invalidate a `true` value + // of `mergeable_succ`. + let mut successors = terminator.successors(); + if let Some(succ) = successors.next() + && successors.next().is_none() + && let &[succ_pred] = self.mir.basic_blocks.predecessors()[succ].as_slice() + { + // bb has a single successor, and bb is its only predecessor. This + // makes it a candidate for merging. + assert_eq!(succ_pred, bb); + true + } else { + false + } + }; + + self.set_debug_loc(bx, terminator.source_info); match terminator.kind { - mir::TerminatorKind::Resume => self.codegen_resume_terminator(helper, bx), + mir::TerminatorKind::Resume => { + self.codegen_resume_terminator(helper, bx); + MergingSucc::False + } mir::TerminatorKind::Abort => { self.codegen_abort_terminator(helper, bx, terminator); + MergingSucc::False } mir::TerminatorKind::Goto { target } => { - helper.funclet_br(self, &mut bx, target); + helper.funclet_br(self, bx, target, mergeable_succ()) } mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref targets } => { self.codegen_switchint_terminator(helper, bx, discr, switch_ty, targets); + MergingSucc::False } mir::TerminatorKind::Return => { self.codegen_return_terminator(bx); + MergingSucc::False } mir::TerminatorKind::Unreachable => { bx.unreachable(); + MergingSucc::False } mir::TerminatorKind::Drop { place, target, unwind } => { - self.codegen_drop_terminator(helper, bx, place, target, unwind); + self.codegen_drop_terminator(helper, bx, place, target, unwind, mergeable_succ()) } - mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => { - self.codegen_assert_terminator( - helper, bx, terminator, cond, expected, msg, target, cleanup, - ); - } + mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => self + .codegen_assert_terminator( + helper, + bx, + terminator, + cond, + expected, + msg, + target, + cleanup, + mergeable_succ(), + ), mir::TerminatorKind::DropAndReplace { .. } => { bug!("undesugared DropAndReplace in codegen: {:?}", terminator); @@ -1203,19 +1337,18 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { cleanup, from_hir_call: _, fn_span, - } => { - self.codegen_call_terminator( - helper, - bx, - terminator, - func, - args, - destination, - target, - cleanup, - fn_span, - ); - } + } => self.codegen_call_terminator( + helper, + bx, + terminator, + func, + args, + destination, + target, + cleanup, + fn_span, + mergeable_succ(), + ), mir::TerminatorKind::GeneratorDrop | mir::TerminatorKind::Yield { .. } => { bug!("generator ops in codegen") } @@ -1230,20 +1363,19 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { line_spans, destination, cleanup, - } => { - self.codegen_asm_terminator( - helper, - bx, - terminator, - template, - operands, - options, - line_spans, - destination, - cleanup, - self.instance, - ); - } + } => self.codegen_asm_terminator( + helper, + bx, + terminator, + template, + operands, + options, + line_spans, + destination, + cleanup, + self.instance, + mergeable_succ(), + ), } } @@ -1561,12 +1693,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // FIXME(eddyb) rename `llbb` and other `ll`-prefixed things to use a // more backend-agnostic prefix such as `cg` (i.e. this would be `cgbb`). pub fn llbb(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock { - self.cached_llbbs[bb].unwrap_or_else(|| { - // FIXME(eddyb) only name the block if `fewer_names` is `false`. - let llbb = Bx::append_block(self.cx, self.llfn, &format!("{:?}", bb)); - self.cached_llbbs[bb] = Some(llbb); - llbb - }) + self.try_llbb(bb).unwrap() + } + + /// Like `llbb`, but may fail if the basic block should be skipped. + pub fn try_llbb(&mut self, bb: mir::BasicBlock) -> Option<Bx::BasicBlock> { + match self.cached_llbbs[bb] { + CachedLlbb::None => { + // FIXME(eddyb) only name the block if `fewer_names` is `false`. + let llbb = Bx::append_block(self.cx, self.llfn, &format!("{:?}", bb)); + self.cached_llbbs[bb] = CachedLlbb::Some(llbb); + Some(llbb) + } + CachedLlbb::Some(llbb) => Some(llbb), + CachedLlbb::Skip => None, + } } fn make_return_dest( diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs index 4c6ab457c..53ff3c240 100644 --- a/compiler/rustc_codegen_ssa/src/mir/constant.rs +++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs @@ -42,7 +42,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; self.cx.tcx().const_eval_resolve(ty::ParamEnv::reveal_all(), uv, None).map_err(|err| { - self.cx.tcx().sess.span_err(constant.span, "erroneous constant encountered"); + match err { + ErrorHandled::Reported(_) => { + self.cx.tcx().sess.span_err(constant.span, "erroneous constant encountered"); + } + ErrorHandled::TooGeneric => { + span_bug!(constant.span, "codegen encountered polymorphic constant: {:?}", err); + } + } err }) } diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs index 157c1c823..99283d3bb 100644 --- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs +++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs @@ -14,6 +14,8 @@ use super::operand::{OperandRef, OperandValue}; use super::place::PlaceRef; use super::{FunctionCx, LocalRef}; +use std::ops::Range; + pub struct FunctionDebugContext<S, L> { pub scopes: IndexVec<mir::SourceScope, DebugScope<S, L>>, } @@ -25,7 +27,7 @@ pub enum VariableKind { } /// Like `mir::VarDebugInfo`, but within a `mir::Local`. -#[derive(Copy, Clone)] +#[derive(Clone)] pub struct PerLocalVarDebugInfo<'tcx, D> { pub name: Symbol, pub source_info: mir::SourceInfo, @@ -33,6 +35,10 @@ pub struct PerLocalVarDebugInfo<'tcx, D> { /// `DIVariable` returned by `create_dbg_var`. pub dbg_var: Option<D>, + /// Byte range in the `dbg_var` covered by this fragment, + /// if this is a fragment of a composite `VarDebugInfo`. + pub fragment: Option<Range<Size>>, + /// `.place.projection` from `mir::VarDebugInfo`. pub projection: &'tcx ty::List<mir::PlaceElem<'tcx>>, } @@ -145,7 +151,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { Some(per_local) => &per_local[local], None => return, }; - let whole_local_var = vars.iter().find(|var| var.projection.is_empty()).copied(); + let whole_local_var = vars.iter().find(|var| var.projection.is_empty()).cloned(); let has_proj = || vars.iter().any(|var| !var.projection.is_empty()); let fallback_var = if self.mir.local_kind(local) == mir::LocalKind::Arg { @@ -187,6 +193,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { name, source_info: decl.source_info, dbg_var, + fragment: None, projection: ty::List::empty(), }) } @@ -199,7 +206,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let name = if bx.sess().fewer_names() { None } else { - Some(match whole_local_var.or(fallback_var) { + Some(match whole_local_var.or(fallback_var.clone()) { Some(var) if var.name != kw::Empty => var.name.to_string(), _ => format!("{:?}", local), }) @@ -249,7 +256,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { LocalRef::UnsizedPlace(_) => return, }; - let vars = vars.iter().copied().chain(fallback_var); + let vars = vars.iter().cloned().chain(fallback_var); for var in vars { let Some(dbg_var) = var.dbg_var else { continue }; @@ -312,9 +319,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.store(place.llval, alloca.llval, alloca.align); // Point the debug info to `*alloca` for the current variable - bx.dbg_var_addr(dbg_var, dbg_loc, alloca.llval, Size::ZERO, &[Size::ZERO]); + bx.dbg_var_addr(dbg_var, dbg_loc, alloca.llval, Size::ZERO, &[Size::ZERO], None); } else { - bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, direct_offset, &indirect_offsets); + bx.dbg_var_addr( + dbg_var, + dbg_loc, + base.llval, + direct_offset, + &indirect_offsets, + None, + ); } } } @@ -382,6 +396,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let ty = self.monomorphize(c.ty()); (ty, VariableKind::LocalVariable) } + mir::VarDebugInfoContents::Composite { ty, fragments: _ } => { + let ty = self.monomorphize(ty); + (ty, VariableKind::LocalVariable) + } }; self.cx.create_dbg_var(var.name, var_ty, dbg_scope, var_kind, span) @@ -393,6 +411,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { name: var.name, source_info: var.source_info, dbg_var, + fragment: None, projection: place.projection, }); } @@ -407,10 +426,48 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx, ); - bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, Size::ZERO, &[]); + bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, Size::ZERO, &[], None); } } } + mir::VarDebugInfoContents::Composite { ty, ref fragments } => { + let var_ty = self.monomorphize(ty); + let var_layout = self.cx.layout_of(var_ty); + for fragment in fragments { + let mut fragment_start = Size::ZERO; + let mut fragment_layout = var_layout; + + for elem in &fragment.projection { + match *elem { + mir::ProjectionElem::Field(field, _) => { + let i = field.index(); + fragment_start += fragment_layout.fields.offset(i); + fragment_layout = fragment_layout.field(self.cx, i); + } + _ => span_bug!( + var.source_info.span, + "unsupported fragment projection `{:?}`", + elem, + ), + } + } + + let place = fragment.contents; + per_local[place.local].push(PerLocalVarDebugInfo { + name: var.name, + source_info: var.source_info, + dbg_var, + fragment: if fragment_layout.size == var_layout.size { + // Fragment covers entire variable, so as far as + // DWARF is concerned, it's not really a fragment. + None + } else { + Some(fragment_start..fragment_start + fragment_layout.size) + }, + projection: place.projection, + }); + } + } } } Some(per_local) diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index da9aaf00e..79c66a955 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -16,6 +16,18 @@ use rustc_middle::mir::traversal; use self::operand::{OperandRef, OperandValue}; +// Used for tracking the state of generated basic blocks. +enum CachedLlbb<T> { + /// Nothing created yet. + None, + + /// Has been created. + Some(T), + + /// Nothing created yet, and nothing should be. + Skip, +} + /// Master context for codegenning from MIR. pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> { instance: Instance<'tcx>, @@ -43,7 +55,7 @@ pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> { /// as-needed (e.g. RPO reaching it or another block branching to it). // FIXME(eddyb) rename `llbbs` and other `ll`-prefixed things to use a // more backend-agnostic prefix such as `cg` (i.e. this would be `cgbbs`). - cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>, + cached_llbbs: IndexVec<mir::BasicBlock, CachedLlbb<Bx::BasicBlock>>, /// The funclet status of each basic block cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>, @@ -155,11 +167,13 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( } let cleanup_kinds = analyze::cleanup_kinds(&mir); - let cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>> = mir - .basic_blocks - .indices() - .map(|bb| if bb == mir::START_BLOCK { Some(start_llbb) } else { None }) - .collect(); + let cached_llbbs: IndexVec<mir::BasicBlock, CachedLlbb<Bx::BasicBlock>> = + mir.basic_blocks + .indices() + .map(|bb| { + if bb == mir::START_BLOCK { CachedLlbb::Some(start_llbb) } else { CachedLlbb::None } + }) + .collect(); let mut fx = FunctionCx { instance, @@ -189,7 +203,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( all_consts_ok = false; match err { // errored or at least linted - ErrorHandled::Reported(_) | ErrorHandled::Linted => {} + ErrorHandled::Reported(_) => {} ErrorHandled::TooGeneric => { span_bug!(const_.span, "codegen encountered polymorphic constant: {:?}", err) } diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index e6ba642a7..34a5b638d 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -40,10 +40,10 @@ pub enum OperandValue<V> { /// instead. #[derive(Copy, Clone)] pub struct OperandRef<'tcx, V> { - // The value. + /// The value. pub val: OperandValue<V>, - // The layout of value, based on its Rust type. + /// The layout of value, based on its Rust type. pub layout: TyAndLayout<'tcx>, } diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 9c18df564..fbe30154a 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -29,7 +29,7 @@ pub struct PlaceRef<'tcx, V> { impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> { - assert!(!layout.is_unsized()); + assert!(layout.is_sized()); PlaceRef { llval, llextra: None, layout, align: layout.align.abi } } @@ -38,7 +38,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { layout: TyAndLayout<'tcx>, align: Align, ) -> PlaceRef<'tcx, V> { - assert!(!layout.is_unsized()); + assert!(layout.is_sized()); PlaceRef { llval, llextra: None, layout, align } } @@ -48,7 +48,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { bx: &mut Bx, layout: TyAndLayout<'tcx>, ) -> Self { - assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); + assert!(layout.is_sized(), "tried to statically allocate unsized place"); let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi); Self::new_sized(tmp, layout) } @@ -145,7 +145,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { ); return simple(); } - _ if !field.is_unsized() => return simple(), + _ if field.is_sized() => return simple(), ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(), ty::Adt(def, _) => { if def.repr().packed() { @@ -209,7 +209,9 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { bx: &mut Bx, cast_to: Ty<'tcx>, ) -> V { - let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to)); + let cast_to_layout = bx.cx().layout_of(cast_to); + let cast_to_size = cast_to_layout.layout.size(); + let cast_to = bx.cx().immediate_backend_type(cast_to_layout); if self.layout.abi.is_uninhabited() { return bx.cx().const_undef(cast_to); } @@ -229,7 +231,8 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { // Read the tag/niche-encoded discriminant from memory. let tag = self.project_field(bx, tag_field); - let tag = bx.load_operand(tag); + let tag_op = bx.load_operand(tag); + let tag_imm = tag_op.immediate(); // Decode the discriminant (specifically if it's niche-encoded). match *tag_encoding { @@ -242,68 +245,170 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { Int(_, signed) => !tag_scalar.is_bool() && signed, _ => false, }; - bx.intcast(tag.immediate(), cast_to, signed) + bx.intcast(tag_imm, cast_to, signed) } TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => { - // Rebase from niche values to discriminants, and check - // whether the result is in range for the niche variants. - let niche_llty = bx.cx().immediate_backend_type(tag.layout); - let tag = tag.immediate(); - - // We first compute the "relative discriminant" (wrt `niche_variants`), - // that is, if `n = niche_variants.end() - niche_variants.start()`, - // we remap `niche_start..=niche_start + n` (which may wrap around) - // to (non-wrap-around) `0..=n`, to be able to check whether the - // discriminant corresponds to a niche variant with one comparison. - // We also can't go directly to the (variant index) discriminant - // and check that it is in the range `niche_variants`, because - // that might not fit in the same type, on top of needing an extra - // comparison (see also the comment on `let niche_discr`). - let relative_discr = if niche_start == 0 { - // Avoid subtracting `0`, which wouldn't work for pointers. - // FIXME(eddyb) check the actual primitive type here. - tag + // Cast to an integer so we don't have to treat a pointer as a + // special case. + let (tag, tag_llty) = if tag_scalar.primitive().is_ptr() { + let t = bx.type_isize(); + let tag = bx.ptrtoint(tag_imm, t); + (tag, t) } else { - bx.sub(tag, bx.cx().const_uint_big(niche_llty, niche_start)) + (tag_imm, bx.cx().immediate_backend_type(tag_op.layout)) }; + + let tag_size = tag_scalar.size(bx.cx()); + let max_unsigned = tag_size.unsigned_int_max(); + let max_signed = tag_size.signed_int_max() as u128; + let min_signed = max_signed + 1; let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32(); - let is_niche = if relative_max == 0 { - // Avoid calling `const_uint`, which wouldn't work for pointers. - // Also use canonical == 0 instead of non-canonical u<= 0. - // FIXME(eddyb) check the actual primitive type here. - bx.icmp(IntPredicate::IntEQ, relative_discr, bx.cx().const_null(niche_llty)) + let niche_end = niche_start.wrapping_add(relative_max as u128) & max_unsigned; + let range = tag_scalar.valid_range(bx.cx()); + + let sle = |lhs: u128, rhs: u128| -> bool { + // Signed and unsigned comparisons give the same results, + // except that in signed comparisons an integer with the + // sign bit set is less than one with the sign bit clear. + // Toggle the sign bit to do a signed comparison. + (lhs ^ min_signed) <= (rhs ^ min_signed) + }; + + // We have a subrange `niche_start..=niche_end` inside `range`. + // If the value of the tag is inside this subrange, it's a + // "niche value", an increment of the discriminant. Otherwise it + // indicates the untagged variant. + // A general algorithm to extract the discriminant from the tag + // is: + // relative_tag = tag - niche_start + // is_niche = relative_tag <= (ule) relative_max + // discr = if is_niche { + // cast(relative_tag) + niche_variants.start() + // } else { + // untagged_variant + // } + // However, we will likely be able to emit simpler code. + + // Find the least and greatest values in `range`, considered + // both as signed and unsigned. + let (low_unsigned, high_unsigned) = if range.start <= range.end { + (range.start, range.end) } else { - let relative_max = bx.cx().const_uint(niche_llty, relative_max as u64); - bx.icmp(IntPredicate::IntULE, relative_discr, relative_max) + (0, max_unsigned) + }; + let (low_signed, high_signed) = if sle(range.start, range.end) { + (range.start, range.end) + } else { + (min_signed, max_signed) }; - // NOTE(eddyb) this addition needs to be performed on the final - // type, in case the niche itself can't represent all variant - // indices (e.g. `u8` niche with more than `256` variants, - // but enough uninhabited variants so that the remaining variants - // fit in the niche). - // In other words, `niche_variants.end - niche_variants.start` - // is representable in the niche, but `niche_variants.end` - // might not be, in extreme cases. - let niche_discr = { - let relative_discr = if relative_max == 0 { - // HACK(eddyb) since we have only one niche, we know which - // one it is, and we can avoid having a dynamic value here. - bx.cx().const_uint(cast_to, 0) + let niches_ule = niche_start <= niche_end; + let niches_sle = sle(niche_start, niche_end); + let cast_smaller = cast_to_size <= tag_size; + + // In the algorithm above, we can change + // cast(relative_tag) + niche_variants.start() + // into + // cast(tag + (niche_variants.start() - niche_start)) + // if either the casted type is no larger than the original + // type, or if the niche values are contiguous (in either the + // signed or unsigned sense). + let can_incr = cast_smaller || niches_ule || niches_sle; + + let data_for_boundary_niche = || -> Option<(IntPredicate, u128)> { + if !can_incr { + None + } else if niche_start == low_unsigned { + Some((IntPredicate::IntULE, niche_end)) + } else if niche_end == high_unsigned { + Some((IntPredicate::IntUGE, niche_start)) + } else if niche_start == low_signed { + Some((IntPredicate::IntSLE, niche_end)) + } else if niche_end == high_signed { + Some((IntPredicate::IntSGE, niche_start)) } else { - bx.intcast(relative_discr, cast_to, false) + None + } + }; + + let (is_niche, tagged_discr, delta) = if relative_max == 0 { + // Best case scenario: only one tagged variant. This will + // likely become just a comparison and a jump. + // The algorithm is: + // is_niche = tag == niche_start + // discr = if is_niche { + // niche_start + // } else { + // untagged_variant + // } + let niche_start = bx.cx().const_uint_big(tag_llty, niche_start); + let is_niche = bx.icmp(IntPredicate::IntEQ, tag, niche_start); + let tagged_discr = + bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64); + (is_niche, tagged_discr, 0) + } else if let Some((predicate, constant)) = data_for_boundary_niche() { + // The niche values are either the lowest or the highest in + // `range`. We can avoid the first subtraction in the + // algorithm. + // The algorithm is now this: + // is_niche = tag <= niche_end + // discr = if is_niche { + // cast(tag + (niche_variants.start() - niche_start)) + // } else { + // untagged_variant + // } + // (the first line may instead be tag >= niche_start, + // and may be a signed or unsigned comparison) + // The arithmetic must be done before the cast, so we can + // have the correct wrapping behavior. See issue #104519 for + // the consequences of getting this wrong. + let is_niche = + bx.icmp(predicate, tag, bx.cx().const_uint_big(tag_llty, constant)); + let delta = (niche_variants.start().as_u32() as u128).wrapping_sub(niche_start); + let incr_tag = if delta == 0 { + tag + } else { + bx.add(tag, bx.cx().const_uint_big(tag_llty, delta)) }; - bx.add( + + let cast_tag = if cast_smaller { + bx.intcast(incr_tag, cast_to, false) + } else if niches_ule { + bx.zext(incr_tag, cast_to) + } else { + bx.sext(incr_tag, cast_to) + }; + + (is_niche, cast_tag, 0) + } else { + // The special cases don't apply, so we'll have to go with + // the general algorithm. + let relative_discr = bx.sub(tag, bx.cx().const_uint_big(tag_llty, niche_start)); + let cast_tag = bx.intcast(relative_discr, cast_to, false); + let is_niche = bx.icmp( + IntPredicate::IntULE, relative_discr, - bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64), - ) + bx.cx().const_uint(tag_llty, relative_max as u64), + ); + (is_niche, cast_tag, niche_variants.start().as_u32() as u128) + }; + + let tagged_discr = if delta == 0 { + tagged_discr + } else { + bx.add(tagged_discr, bx.cx().const_uint_big(cast_to, delta)) }; - bx.select( + let discr = bx.select( is_niche, - niche_discr, + tagged_discr, bx.cx().const_uint(cast_to, untagged_variant.as_u32() as u64), - ) + ); + + // In principle we could insert assumes on the possible range of `discr`, but + // currently in LLVM this seems to be a pessimization. + + discr } } } diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 4aab31fbf..9ad96f7a4 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -18,17 +18,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { #[instrument(level = "trace", skip(self, bx))] pub fn codegen_rvalue( &mut self, - mut bx: Bx, + bx: &mut Bx, dest: PlaceRef<'tcx, Bx::Value>, rvalue: &mir::Rvalue<'tcx>, - ) -> Bx { + ) { match *rvalue { mir::Rvalue::Use(ref operand) => { - let cg_operand = self.codegen_operand(&mut bx, operand); + let cg_operand = self.codegen_operand(bx, operand); // FIXME: consider not copying constants through stack. (Fixable by codegen'ing // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?) - cg_operand.val.store(&mut bx, dest); - bx + cg_operand.val.store(bx, dest); } mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => { @@ -37,16 +36,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if bx.cx().is_backend_scalar_pair(dest.layout) { // Into-coerce of a thin pointer to a fat pointer -- just // use the operand path. - let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue); - temp.val.store(&mut bx, dest); - return bx; + let temp = self.codegen_rvalue_operand(bx, rvalue); + temp.val.store(bx, dest); + return; } // Unsize of a nontrivial struct. I would prefer for // this to be eliminated by MIR building, but // `CoerceUnsized` can be passed by a where-clause, // so the (generic) MIR may not be able to expand it. - let operand = self.codegen_operand(&mut bx, source); + let operand = self.codegen_operand(bx, source); match operand.val { OperandValue::Pair(..) | OperandValue::Immediate(_) => { // Unsize from an immediate structure. We don't @@ -56,63 +55,62 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // index into the struct, and this case isn't // important enough for it. debug!("codegen_rvalue: creating ugly alloca"); - let scratch = PlaceRef::alloca(&mut bx, operand.layout); - scratch.storage_live(&mut bx); - operand.val.store(&mut bx, scratch); - base::coerce_unsized_into(&mut bx, scratch, dest); - scratch.storage_dead(&mut bx); + let scratch = PlaceRef::alloca(bx, operand.layout); + scratch.storage_live(bx); + operand.val.store(bx, scratch); + base::coerce_unsized_into(bx, scratch, dest); + scratch.storage_dead(bx); } OperandValue::Ref(llref, None, align) => { let source = PlaceRef::new_sized_aligned(llref, operand.layout, align); - base::coerce_unsized_into(&mut bx, source, dest); + base::coerce_unsized_into(bx, source, dest); } OperandValue::Ref(_, Some(_), _) => { bug!("unsized coercion on an unsized rvalue"); } } - bx } mir::Rvalue::Repeat(ref elem, count) => { - let cg_elem = self.codegen_operand(&mut bx, elem); + let cg_elem = self.codegen_operand(bx, elem); // Do not generate the loop for zero-sized elements or empty arrays. if dest.layout.is_zst() { - return bx; + return; } if let OperandValue::Immediate(v) = cg_elem.val { let zero = bx.const_usize(0); - let start = dest.project_index(&mut bx, zero).llval; + let start = dest.project_index(bx, zero).llval; let size = bx.const_usize(dest.layout.size.bytes()); // Use llvm.memset.p0i8.* to initialize all zero arrays if bx.cx().const_to_opt_u128(v, false) == Some(0) { let fill = bx.cx().const_u8(0); bx.memset(start, fill, size, dest.align, MemFlags::empty()); - return bx; + return; } // Use llvm.memset.p0i8.* to initialize byte arrays let v = bx.from_immediate(v); if bx.cx().val_ty(v) == bx.cx().type_i8() { bx.memset(start, v, size, dest.align, MemFlags::empty()); - return bx; + return; } } let count = self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all()); - bx.write_operand_repeatedly(cg_elem, count, dest) + bx.write_operand_repeatedly(cg_elem, count, dest); } mir::Rvalue::Aggregate(ref kind, ref operands) => { let (dest, active_field_index) = match **kind { mir::AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => { - dest.codegen_set_discr(&mut bx, variant_index); + dest.codegen_set_discr(bx, variant_index); if bx.tcx().adt_def(adt_did).is_enum() { - (dest.project_downcast(&mut bx, variant_index), active_field_index) + (dest.project_downcast(bx, variant_index), active_field_index) } else { (dest, active_field_index) } @@ -120,37 +118,35 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { _ => (dest, None), }; for (i, operand) in operands.iter().enumerate() { - let op = self.codegen_operand(&mut bx, operand); + let op = self.codegen_operand(bx, operand); // Do not generate stores and GEPis for zero-sized fields. if !op.layout.is_zst() { let field_index = active_field_index.unwrap_or(i); let field = if let mir::AggregateKind::Array(_) = **kind { let llindex = bx.cx().const_usize(field_index as u64); - dest.project_index(&mut bx, llindex) + dest.project_index(bx, llindex) } else { - dest.project_field(&mut bx, field_index) + dest.project_field(bx, field_index) }; - op.val.store(&mut bx, field); + op.val.store(bx, field); } } - bx } _ => { assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP)); - let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue); - temp.val.store(&mut bx, dest); - bx + let temp = self.codegen_rvalue_operand(bx, rvalue); + temp.val.store(bx, dest); } } } pub fn codegen_rvalue_unsized( &mut self, - mut bx: Bx, + bx: &mut Bx, indirect_dest: PlaceRef<'tcx, Bx::Value>, rvalue: &mir::Rvalue<'tcx>, - ) -> Bx { + ) { debug!( "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})", indirect_dest.llval, rvalue @@ -158,9 +154,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { match *rvalue { mir::Rvalue::Use(ref operand) => { - let cg_operand = self.codegen_operand(&mut bx, operand); - cg_operand.val.store_unsized(&mut bx, indirect_dest); - bx + let cg_operand = self.codegen_operand(bx, operand); + cg_operand.val.store_unsized(bx, indirect_dest); } _ => bug!("unsized assignment other than `Rvalue::Use`"), @@ -169,9 +164,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn codegen_rvalue_operand( &mut self, - mut bx: Bx, + bx: &mut Bx, rvalue: &mir::Rvalue<'tcx>, - ) -> (Bx, OperandRef<'tcx, Bx::Value>) { + ) -> OperandRef<'tcx, Bx::Value> { assert!( self.rvalue_creates_operand(rvalue, DUMMY_SP), "cannot codegen {:?} to operand", @@ -180,7 +175,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { match *rvalue { mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => { - let operand = self.codegen_operand(&mut bx, source); + let operand = self.codegen_operand(bx, source); debug!("cast operand is {:?}", operand); let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty)); @@ -245,7 +240,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } }; let (lldata, llextra) = - base::unsize_ptr(&mut bx, lldata, operand.layout.ty, cast.ty, llextra); + base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra); OperandValue::Pair(lldata, llextra) } mir::CastKind::Pointer(PointerCast::MutToConstPointer) @@ -278,7 +273,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Pair(v, l) => (v, Some(l)), }; let (lldata, llextra) = - base::cast_to_dyn_star(&mut bx, lldata, operand.layout, cast.ty, llextra); + base::cast_to_dyn_star(bx, lldata, operand.layout, cast.ty, llextra); OperandValue::Pair(lldata, llextra) } mir::CastKind::Pointer( @@ -299,7 +294,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let ll_t_out = bx.cx().immediate_backend_type(cast); if operand.layout.abi.is_uninhabited() { let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out)); - return (bx, OperandRef { val, layout: cast }); + return OperandRef { val, layout: cast }; } let r_t_in = CastTy::from_ty(operand.layout.ty).expect("bad input type for cast"); @@ -348,7 +343,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Immediate(newval) } }; - (bx, OperandRef { val, layout: cast }) + OperandRef { val, layout: cast } } mir::Rvalue::Ref(_, bk, place) => { @@ -361,10 +356,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { self.codegen_place_to_pointer(bx, place, mk_ref) } - mir::Rvalue::CopyForDeref(place) => { - let operand = self.codegen_operand(&mut bx, &Operand::Copy(place)); - (bx, operand) - } + mir::Rvalue::CopyForDeref(place) => self.codegen_operand(bx, &Operand::Copy(place)), mir::Rvalue::AddressOf(mutability, place) => { let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| { tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability }) @@ -373,23 +365,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } mir::Rvalue::Len(place) => { - let size = self.evaluate_array_len(&mut bx, place); - let operand = OperandRef { + let size = self.evaluate_array_len(bx, place); + OperandRef { val: OperandValue::Immediate(size), layout: bx.cx().layout_of(bx.tcx().types.usize), - }; - (bx, operand) + } } mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => { - let lhs = self.codegen_operand(&mut bx, lhs); - let rhs = self.codegen_operand(&mut bx, rhs); + let lhs = self.codegen_operand(bx, lhs); + let rhs = self.codegen_operand(bx, rhs); let llresult = match (lhs.val, rhs.val) { ( OperandValue::Pair(lhs_addr, lhs_extra), OperandValue::Pair(rhs_addr, rhs_extra), ) => self.codegen_fat_ptr_binop( - &mut bx, + bx, op, lhs_addr, lhs_extra, @@ -399,22 +390,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { ), (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => { - self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty) + self.codegen_scalar_binop(bx, op, lhs_val, rhs_val, lhs.layout.ty) } _ => bug!(), }; - let operand = OperandRef { + OperandRef { val: OperandValue::Immediate(llresult), layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)), - }; - (bx, operand) + } } mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => { - let lhs = self.codegen_operand(&mut bx, lhs); - let rhs = self.codegen_operand(&mut bx, rhs); + let lhs = self.codegen_operand(bx, lhs); + let rhs = self.codegen_operand(bx, rhs); let result = self.codegen_scalar_checked_binop( - &mut bx, + bx, op, lhs.immediate(), rhs.immediate(), @@ -422,13 +412,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { ); let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty); let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]); - let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }; - - (bx, operand) + OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) } } mir::Rvalue::UnaryOp(op, ref operand) => { - let operand = self.codegen_operand(&mut bx, operand); + let operand = self.codegen_operand(bx, operand); let lloperand = operand.immediate(); let is_float = operand.layout.ty.is_floating_point(); let llval = match op { @@ -441,22 +429,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } }; - (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout }) + OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout } } mir::Rvalue::Discriminant(ref place) => { let discr_ty = rvalue.ty(self.mir, bx.tcx()); let discr_ty = self.monomorphize(discr_ty); - let discr = self - .codegen_place(&mut bx, place.as_ref()) - .codegen_get_discr(&mut bx, discr_ty); - ( - bx, - OperandRef { - val: OperandValue::Immediate(discr), - layout: self.cx.layout_of(discr_ty), - }, - ) + let discr = self.codegen_place(bx, place.as_ref()).codegen_get_discr(bx, discr_ty); + OperandRef { + val: OperandValue::Immediate(discr), + layout: self.cx.layout_of(discr_ty), + } } mir::Rvalue::NullaryOp(null_op, ty) => { @@ -469,36 +452,27 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; let val = bx.cx().const_usize(val); let tcx = self.cx.tcx(); - ( - bx, - OperandRef { - val: OperandValue::Immediate(val), - layout: self.cx.layout_of(tcx.types.usize), - }, - ) + OperandRef { + val: OperandValue::Immediate(val), + layout: self.cx.layout_of(tcx.types.usize), + } } mir::Rvalue::ThreadLocalRef(def_id) => { assert!(bx.cx().tcx().is_static(def_id)); let static_ = bx.get_static(def_id); let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id)); - let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout); - (bx, operand) - } - mir::Rvalue::Use(ref operand) => { - let operand = self.codegen_operand(&mut bx, operand); - (bx, operand) + OperandRef::from_immediate_or_packed_pair(bx, static_, layout) } + mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand), mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => { // According to `rvalue_creates_operand`, only ZST // aggregate rvalues are allowed to be operands. let ty = rvalue.ty(self.mir, self.cx.tcx()); - let operand = - OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty))); - (bx, operand) + OperandRef::new_zst(bx, self.cx.layout_of(self.monomorphize(ty))) } mir::Rvalue::ShallowInitBox(ref operand, content_ty) => { - let operand = self.codegen_operand(&mut bx, operand); + let operand = self.codegen_operand(bx, operand); let lloperand = operand.immediate(); let content_ty = self.monomorphize(content_ty); @@ -506,8 +480,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let llty_ptr = bx.cx().backend_type(box_layout); let val = bx.pointercast(lloperand, llty_ptr); - let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout }; - (bx, operand) + OperandRef { val: OperandValue::Immediate(val), layout: box_layout } } } } @@ -531,11 +504,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref` fn codegen_place_to_pointer( &mut self, - mut bx: Bx, + bx: &mut Bx, place: mir::Place<'tcx>, mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>, - ) -> (Bx, OperandRef<'tcx, Bx::Value>) { - let cg_place = self.codegen_place(&mut bx, place.as_ref()); + ) -> OperandRef<'tcx, Bx::Value> { + let cg_place = self.codegen_place(bx, place.as_ref()); let ty = cg_place.layout.ty; @@ -546,7 +519,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } else { OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap()) }; - (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }) + OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) } } pub fn codegen_scalar_binop( diff --git a/compiler/rustc_codegen_ssa/src/mir/statement.rs b/compiler/rustc_codegen_ssa/src/mir/statement.rs index 1db0fb3a6..19452c8cd 100644 --- a/compiler/rustc_codegen_ssa/src/mir/statement.rs +++ b/compiler/rustc_codegen_ssa/src/mir/statement.rs @@ -8,8 +8,8 @@ use crate::traits::*; impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { #[instrument(level = "debug", skip(self, bx))] - pub fn codegen_statement(&mut self, mut bx: Bx, statement: &mir::Statement<'tcx>) -> Bx { - self.set_debug_loc(&mut bx, statement.source_info); + pub fn codegen_statement(&mut self, bx: &mut Bx, statement: &mir::Statement<'tcx>) { + self.set_debug_loc(bx, statement.source_info); match statement.kind { mir::StatementKind::Assign(box (ref place, ref rvalue)) => { if let Some(index) = place.as_local() { @@ -19,10 +19,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { self.codegen_rvalue_unsized(bx, cg_indirect_dest, rvalue) } LocalRef::Operand(None) => { - let (mut bx, operand) = self.codegen_rvalue_operand(bx, rvalue); + let operand = self.codegen_rvalue_operand(bx, rvalue); self.locals[index] = LocalRef::Operand(Some(operand)); - self.debug_introduce_local(&mut bx, index); - bx + self.debug_introduce_local(bx, index); } LocalRef::Operand(Some(op)) => { if !op.layout.is_zst() { @@ -35,59 +34,52 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // If the type is zero-sized, it's already been set here, // but we still need to make sure we codegen the operand - self.codegen_rvalue_operand(bx, rvalue).0 + self.codegen_rvalue_operand(bx, rvalue); } } } else { - let cg_dest = self.codegen_place(&mut bx, place.as_ref()); - self.codegen_rvalue(bx, cg_dest, rvalue) + let cg_dest = self.codegen_place(bx, place.as_ref()); + self.codegen_rvalue(bx, cg_dest, rvalue); } } mir::StatementKind::SetDiscriminant { box ref place, variant_index } => { - self.codegen_place(&mut bx, place.as_ref()) - .codegen_set_discr(&mut bx, variant_index); - bx + self.codegen_place(bx, place.as_ref()).codegen_set_discr(bx, variant_index); } mir::StatementKind::Deinit(..) => { // For now, don't codegen this to anything. In the future it may be worth // experimenting with what kind of information we can emit to LLVM without hurting // perf here - bx } mir::StatementKind::StorageLive(local) => { if let LocalRef::Place(cg_place) = self.locals[local] { - cg_place.storage_live(&mut bx); + cg_place.storage_live(bx); } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] { - cg_indirect_place.storage_live(&mut bx); + cg_indirect_place.storage_live(bx); } - bx } mir::StatementKind::StorageDead(local) => { if let LocalRef::Place(cg_place) = self.locals[local] { - cg_place.storage_dead(&mut bx); + cg_place.storage_dead(bx); } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] { - cg_indirect_place.storage_dead(&mut bx); + cg_indirect_place.storage_dead(bx); } - bx } mir::StatementKind::Coverage(box ref coverage) => { - self.codegen_coverage(&mut bx, coverage.clone(), statement.source_info.scope); - bx + self.codegen_coverage(bx, coverage.clone(), statement.source_info.scope); } mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(ref op)) => { - let op_val = self.codegen_operand(&mut bx, op); + let op_val = self.codegen_operand(bx, op); bx.assume(op_val.immediate()); - bx } mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping( mir::CopyNonOverlapping { ref count, ref src, ref dst }, )) => { - let dst_val = self.codegen_operand(&mut bx, dst); - let src_val = self.codegen_operand(&mut bx, src); - let count = self.codegen_operand(&mut bx, count).immediate(); + let dst_val = self.codegen_operand(bx, dst); + let src_val = self.codegen_operand(bx, src); + let count = self.codegen_operand(bx, count).immediate(); let pointee_layout = dst_val .layout - .pointee_info_at(&bx, rustc_target::abi::Size::ZERO) + .pointee_info_at(bx, rustc_target::abi::Size::ZERO) .expect("Expected pointer"); let bytes = bx.mul(count, bx.const_usize(pointee_layout.size.bytes())); @@ -95,12 +87,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let dst = dst_val.immediate(); let src = src_val.immediate(); bx.memcpy(dst, align, src, align, bytes, crate::MemFlags::empty()); - bx } mir::StatementKind::FakeRead(..) | mir::StatementKind::Retag { .. } | mir::StatementKind::AscribeUserType(..) - | mir::StatementKind::Nop => bx, + | mir::StatementKind::Nop => {} } } } diff --git a/compiler/rustc_codegen_ssa/src/mono_item.rs b/compiler/rustc_codegen_ssa/src/mono_item.rs index 5006a2157..27da33581 100644 --- a/compiler/rustc_codegen_ssa/src/mono_item.rs +++ b/compiler/rustc_codegen_ssa/src/mono_item.rs @@ -40,12 +40,12 @@ impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> { .iter() .map(|(op, op_sp)| match *op { hir::InlineAsmOperand::Const { ref anon_const } => { - let anon_const_def_id = - cx.tcx().hir().local_def_id(anon_const.hir_id).to_def_id(); - let const_value = - cx.tcx().const_eval_poly(anon_const_def_id).unwrap_or_else( - |_| span_bug!(*op_sp, "asm const cannot be resolved"), - ); + let const_value = cx + .tcx() + .const_eval_poly(anon_const.def_id.to_def_id()) + .unwrap_or_else(|_| { + span_bug!(*op_sp, "asm const cannot be resolved") + }); let ty = cx .tcx() .typeck_body(anon_const.body) diff --git a/compiler/rustc_codegen_ssa/src/target_features.rs b/compiler/rustc_codegen_ssa/src/target_features.rs index 83407ee8f..301683e8e 100644 --- a/compiler/rustc_codegen_ssa/src/target_features.rs +++ b/compiler/rustc_codegen_ssa/src/target_features.rs @@ -179,6 +179,7 @@ const X86_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[ ("f16c", Some(sym::f16c_target_feature)), ("fma", None), ("fxsr", None), + ("gfni", Some(sym::avx512_target_feature)), ("lzcnt", None), ("movbe", Some(sym::movbe_target_feature)), ("pclmulqdq", None), @@ -195,6 +196,8 @@ const X86_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[ ("sse4a", Some(sym::sse4a_target_feature)), ("ssse3", None), ("tbm", Some(sym::tbm_target_feature)), + ("vaes", Some(sym::avx512_target_feature)), + ("vpclmulqdq", Some(sym::avx512_target_feature)), ("xsave", None), ("xsavec", None), ("xsaveopt", None), @@ -212,6 +215,7 @@ const HEXAGON_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[ const POWERPC_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[ // tidy-alphabetical-start ("altivec", Some(sym::powerpc_target_feature)), + ("power10-vector", Some(sym::powerpc_target_feature)), ("power8-altivec", Some(sym::powerpc_target_feature)), ("power8-vector", Some(sym::powerpc_target_feature)), ("power9-altivec", Some(sym::powerpc_target_feature)), @@ -267,6 +271,7 @@ const WASM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[ // tidy-alphabetical-start ("atomics", Some(sym::wasm_target_feature)), ("bulk-memory", Some(sym::wasm_target_feature)), + ("multivalue", Some(sym::wasm_target_feature)), ("mutable-globals", Some(sym::wasm_target_feature)), ("nontrapping-fptoint", Some(sym::wasm_target_feature)), ("reference-types", Some(sym::wasm_target_feature)), diff --git a/compiler/rustc_codegen_ssa/src/traits/backend.rs b/compiler/rustc_codegen_ssa/src/traits/backend.rs index 87e347c61..5c35070ea 100644 --- a/compiler/rustc_codegen_ssa/src/traits/backend.rs +++ b/compiler/rustc_codegen_ssa/src/traits/backend.rs @@ -119,7 +119,7 @@ pub trait ExtraBackendMethods: CodegenBackend + WriteBackendMethods + Sized + Se tcx: TyCtxt<'tcx>, module_name: &str, kind: AllocatorKind, - has_alloc_error_handler: bool, + alloc_error_handler_kind: AllocatorKind, ) -> Self::Module; /// This generates the codegen unit and returns it along with /// a `u64` giving an estimate of the unit's processing cost. diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index 01408f39f..bc679a5dc 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -151,11 +151,11 @@ pub trait BuilderMethods<'a, 'tcx>: /// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset. fn write_operand_repeatedly( - self, + &mut self, elem: OperandRef<'tcx, Self::Value>, count: u64, dest: PlaceRef<'tcx, Self::Value>, - ) -> Self; + ); fn range_metadata(&mut self, load: Self::Value, range: WrappingRange); fn nonnull_metadata(&mut self, load: Self::Value); diff --git a/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs b/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs index f310789d1..63fecaf34 100644 --- a/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs +++ b/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs @@ -6,6 +6,8 @@ use rustc_span::{SourceFile, Span, Symbol}; use rustc_target::abi::call::FnAbi; use rustc_target::abi::Size; +use std::ops::Range; + pub trait DebugInfoMethods<'tcx>: BackendTypes { fn create_vtable_debuginfo( &self, @@ -72,6 +74,9 @@ pub trait DebugInfoBuilderMethods: BackendTypes { direct_offset: Size, // NB: each offset implies a deref (i.e. they're steps in a pointer chain). indirect_offsets: &[Size], + // Byte range in the `dbg_var` covered by this fragment, + // if this is a fragment of a composite `DIVariable`. + fragment: Option<Range<Size>>, ); fn set_dbg_loc(&mut self, dbg_loc: Self::DILocation); fn insert_reference_to_gdb_debug_scripts_section_global(&mut self); diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs index bdc6a91cf..86481d5d7 100644 --- a/compiler/rustc_codegen_ssa/src/traits/type_.rs +++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs @@ -22,6 +22,7 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> { fn type_f32(&self) -> Self::Type; fn type_f64(&self) -> Self::Type; + fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type; fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type; fn type_kind(&self, ty: Self::Type) -> TypeKind; |