summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_ssa/src/back
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:19:13 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:19:13 +0000
commit218caa410aa38c29984be31a5229b9fa717560ee (patch)
treec54bd55eeb6e4c508940a30e94c0032fbd45d677 /compiler/rustc_codegen_ssa/src/back
parentReleasing progress-linux version 1.67.1+dfsg1-1~progress7.99u1. (diff)
downloadrustc-218caa410aa38c29984be31a5229b9fa717560ee.tar.xz
rustc-218caa410aa38c29984be31a5229b9fa717560ee.zip
Merging upstream version 1.68.2+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_ssa/src/back')
-rw-r--r--compiler/rustc_codegen_ssa/src/back/archive.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/back/link.rs206
-rw-r--r--compiler/rustc_codegen_ssa/src/back/linker.rs4
-rw-r--r--compiler/rustc_codegen_ssa/src/back/metadata.rs30
-rw-r--r--compiler/rustc_codegen_ssa/src/back/symbol_export.rs20
-rw-r--r--compiler/rustc_codegen_ssa/src/back/write.rs36
6 files changed, 177 insertions, 121 deletions
diff --git a/compiler/rustc_codegen_ssa/src/back/archive.rs b/compiler/rustc_codegen_ssa/src/back/archive.rs
index 58558fb8c..d3cd085cf 100644
--- a/compiler/rustc_codegen_ssa/src/back/archive.rs
+++ b/compiler/rustc_codegen_ssa/src/back/archive.rs
@@ -124,7 +124,7 @@ fn try_filter_fat_archs(
) -> io::Result<Option<PathBuf>> {
let archs = archs.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
- let desired = match archs.iter().filter(|a| a.architecture() == target_arch).next() {
+ let desired = match archs.iter().find(|a| a.architecture() == target_arch) {
Some(a) => a,
None => return Ok(None),
};
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs
index fe2e4b36c..34e042376 100644
--- a/compiler/rustc_codegen_ssa/src/back/link.rs
+++ b/compiler/rustc_codegen_ssa/src/back/link.rs
@@ -11,7 +11,7 @@ use rustc_metadata::find_native_static_library;
use rustc_metadata::fs::{emit_wrapper_file, METADATA_FILENAME};
use rustc_middle::middle::dependency_format::Linkage;
use rustc_middle::middle::exported_symbols::SymbolExportKind;
-use rustc_session::config::{self, CFGuard, CrateType, DebugInfo, LdImpl, Lto, Strip};
+use rustc_session::config::{self, CFGuard, CrateType, DebugInfo, LdImpl, Strip};
use rustc_session::config::{OutputFilenames, OutputType, PrintRequest, SplitDwarfKind};
use rustc_session::cstore::DllImport;
use rustc_session::output::{check_file_is_writeable, invalid_output_for_target, out_filename};
@@ -208,16 +208,16 @@ pub fn link_binary<'a>(
Ok(())
}
+// Crate type is not passed when calculating the dylibs to include for LTO. In that case all
+// crate types must use the same dependency formats.
pub fn each_linked_rlib(
- sess: &Session,
info: &CrateInfo,
+ crate_type: Option<CrateType>,
f: &mut dyn FnMut(CrateNum, &Path),
) -> Result<(), errors::LinkRlibError> {
let crates = info.used_crates.iter();
- let mut fmts = None;
- let lto_active = matches!(sess.lto(), Lto::Fat | Lto::Thin);
- if lto_active {
+ let fmts = if crate_type.is_none() {
for combination in info.dependency_formats.iter().combinations(2) {
let (ty1, list1) = &combination[0];
let (ty2, list2) = &combination[1];
@@ -230,27 +230,23 @@ pub fn each_linked_rlib(
});
}
}
- }
-
- for (ty, list) in info.dependency_formats.iter() {
- match ty {
- CrateType::Executable
- | CrateType::Staticlib
- | CrateType::Cdylib
- | CrateType::ProcMacro => {
- fmts = Some(list);
- break;
- }
- CrateType::Dylib if lto_active => {
- fmts = Some(list);
- break;
- }
- _ => {}
+ if info.dependency_formats.is_empty() {
+ return Err(errors::LinkRlibError::MissingFormat);
}
- }
- let Some(fmts) = fmts else {
- return Err(errors::LinkRlibError::MissingFormat);
+ &info.dependency_formats[0].1
+ } else {
+ let fmts = info
+ .dependency_formats
+ .iter()
+ .find_map(|&(ty, ref list)| if Some(ty) == crate_type { Some(list) } else { None });
+
+ let Some(fmts) = fmts else {
+ return Err(errors::LinkRlibError::MissingFormat);
+ };
+
+ fmts
};
+
for &cnum in crates {
match fmts.get(cnum.as_usize() - 1) {
Some(&Linkage::NotLinked | &Linkage::Dynamic | &Linkage::IncludedFromDylib) => continue,
@@ -449,7 +445,7 @@ fn link_rlib<'a>(
/// Extract all symbols defined in raw-dylib libraries, collated by library name.
///
/// If we have multiple extern blocks that specify symbols defined in the same raw-dylib library,
-/// then the CodegenResults value contains one NativeLib instance for each block. However, the
+/// then the CodegenResults value contains one NativeLib instance for each block. However, the
/// linker appears to expect only a single import library for each library used, so we need to
/// collate the symbols together by library name before generating the import libraries.
fn collate_raw_dylibs<'a, 'b>(
@@ -516,64 +512,71 @@ fn link_staticlib<'a>(
)?;
let mut all_native_libs = vec![];
- let res = each_linked_rlib(sess, &codegen_results.crate_info, &mut |cnum, path| {
- let name = codegen_results.crate_info.crate_name[&cnum];
- let native_libs = &codegen_results.crate_info.native_libraries[&cnum];
-
- // Here when we include the rlib into our staticlib we need to make a
- // decision whether to include the extra object files along the way.
- // These extra object files come from statically included native
- // libraries, but they may be cfg'd away with #[link(cfg(..))].
- //
- // This unstable feature, though, only needs liblibc to work. The only
- // use case there is where musl is statically included in liblibc.rlib,
- // so if we don't want the included version we just need to skip it. As
- // a result the logic here is that if *any* linked library is cfg'd away
- // we just skip all object files.
- //
- // Clearly this is not sufficient for a general purpose feature, and
- // we'd want to read from the library's metadata to determine which
- // object files come from where and selectively skip them.
- let skip_object_files = native_libs.iter().any(|lib| {
- matches!(lib.kind, NativeLibKind::Static { bundle: None | Some(true), .. })
- && !relevant_lib(sess, lib)
- });
+ let res = each_linked_rlib(
+ &codegen_results.crate_info,
+ Some(CrateType::Staticlib),
+ &mut |cnum, path| {
+ let name = codegen_results.crate_info.crate_name[&cnum];
+ let native_libs = &codegen_results.crate_info.native_libraries[&cnum];
+
+ // Here when we include the rlib into our staticlib we need to make a
+ // decision whether to include the extra object files along the way.
+ // These extra object files come from statically included native
+ // libraries, but they may be cfg'd away with #[link(cfg(..))].
+ //
+ // This unstable feature, though, only needs liblibc to work. The only
+ // use case there is where musl is statically included in liblibc.rlib,
+ // so if we don't want the included version we just need to skip it. As
+ // a result the logic here is that if *any* linked library is cfg'd away
+ // we just skip all object files.
+ //
+ // Clearly this is not sufficient for a general purpose feature, and
+ // we'd want to read from the library's metadata to determine which
+ // object files come from where and selectively skip them.
+ let skip_object_files = native_libs.iter().any(|lib| {
+ matches!(lib.kind, NativeLibKind::Static { bundle: None | Some(true), .. })
+ && !relevant_lib(sess, lib)
+ });
- let lto = are_upstream_rust_objects_already_included(sess)
- && !ignored_for_lto(sess, &codegen_results.crate_info, cnum);
+ let lto = are_upstream_rust_objects_already_included(sess)
+ && !ignored_for_lto(sess, &codegen_results.crate_info, cnum);
- // Ignoring obj file starting with the crate name
- // as simple comparison is not enough - there
- // might be also an extra name suffix
- let obj_start = name.as_str().to_owned();
+ // Ignoring obj file starting with the crate name
+ // as simple comparison is not enough - there
+ // might be also an extra name suffix
+ let obj_start = name.as_str().to_owned();
- ab.add_archive(
- path,
- Box::new(move |fname: &str| {
- // Ignore metadata files, no matter the name.
- if fname == METADATA_FILENAME {
- return true;
- }
+ ab.add_archive(
+ path,
+ Box::new(move |fname: &str| {
+ // Ignore metadata files, no matter the name.
+ if fname == METADATA_FILENAME {
+ return true;
+ }
- // Don't include Rust objects if LTO is enabled
- if lto && looks_like_rust_object_file(fname) {
- return true;
- }
+ // Don't include Rust objects if LTO is enabled
+ if lto && looks_like_rust_object_file(fname) {
+ return true;
+ }
- // Otherwise if this is *not* a rust object and we're skipping
- // objects then skip this file
- if skip_object_files && (!fname.starts_with(&obj_start) || !fname.ends_with(".o")) {
- return true;
- }
+ // Otherwise if this is *not* a rust object and we're skipping
+ // objects then skip this file
+ if skip_object_files
+ && (!fname.starts_with(&obj_start) || !fname.ends_with(".o"))
+ {
+ return true;
+ }
- // ok, don't skip this
- false
- }),
- )
- .unwrap();
+ // ok, don't skip this
+ false
+ }),
+ )
+ .unwrap();
- all_native_libs.extend(codegen_results.crate_info.native_libraries[&cnum].iter().cloned());
- });
+ all_native_libs
+ .extend(codegen_results.crate_info.native_libraries[&cnum].iter().cloned());
+ },
+ );
if let Err(e) = res {
sess.emit_fatal(e);
}
@@ -607,21 +610,21 @@ fn link_dwarf_object<'a>(
}
impl<Relocations> ThorinSession<Relocations> {
- fn alloc_mmap<'arena>(&'arena self, data: Mmap) -> &'arena Mmap {
+ fn alloc_mmap(&self, data: Mmap) -> &Mmap {
(*self.arena_mmap.alloc(data)).borrow()
}
}
impl<Relocations> thorin::Session<Relocations> for ThorinSession<Relocations> {
- fn alloc_data<'arena>(&'arena self, data: Vec<u8>) -> &'arena [u8] {
+ fn alloc_data(&self, data: Vec<u8>) -> &[u8] {
(*self.arena_data.alloc(data)).borrow()
}
- fn alloc_relocation<'arena>(&'arena self, data: Relocations) -> &'arena Relocations {
+ fn alloc_relocation(&self, data: Relocations) -> &Relocations {
(*self.arena_relocations.alloc(data)).borrow()
}
- fn read_input<'arena>(&'arena self, path: &Path) -> std::io::Result<&'arena [u8]> {
+ fn read_input(&self, path: &Path) -> std::io::Result<&[u8]> {
let file = File::open(&path)?;
let mmap = (unsafe { Mmap::map(file) })?;
Ok(self.alloc_mmap(mmap))
@@ -722,7 +725,7 @@ fn link_natively<'a>(
linker::disable_localization(&mut cmd);
- for &(ref k, ref v) in sess.target.link_env.as_ref() {
+ for (k, v) in sess.target.link_env.as_ref() {
cmd.env(k.as_ref(), v.as_ref());
}
for k in sess.target.link_env_remove.as_ref() {
@@ -1194,7 +1197,7 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) {
if cfg!(any(target_os = "solaris", target_os = "illumos")) {
// On historical Solaris systems, "cc" may have
// been Sun Studio, which is not flag-compatible
- // with "gcc". This history casts a long shadow,
+ // with "gcc". This history casts a long shadow,
// and many modern illumos distributions today
// ship GCC as "gcc" without also making it
// available as "cc".
@@ -1228,12 +1231,23 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) {
sess.emit_fatal(errors::LinkerFileStem);
});
+ // Remove any version postfix.
+ let stem = stem
+ .rsplit_once('-')
+ .and_then(|(lhs, rhs)| rhs.chars().all(char::is_numeric).then_some(lhs))
+ .unwrap_or(stem);
+
+ // GCC/Clang can have an optional target prefix.
let flavor = if stem == "emcc" {
LinkerFlavor::EmCc
} else if stem == "gcc"
|| stem.ends_with("-gcc")
+ || stem == "g++"
+ || stem.ends_with("-g++")
|| stem == "clang"
|| stem.ends_with("-clang")
+ || stem == "clang++"
+ || stem.ends_with("-clang++")
{
LinkerFlavor::from_cli(LinkerFlavorCli::Gcc, &sess.target)
} else if stem == "wasm-ld" || stem.ends_with("-wasm-ld") {
@@ -1354,7 +1368,8 @@ fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLib]) {
if !lib_args.is_empty() {
sess.emit_note(errors::StaticLibraryNativeArtifacts);
// Prefix for greppability
- sess.emit_note(errors::NativeStaticLibs { arguments: lib_args.join(" ") });
+ // Note: This must not be translated as tools are allowed to depend on this exact string.
+ sess.note_without_error(&format!("native-static-libs: {}", &lib_args.join(" ")));
}
}
@@ -2612,7 +2627,7 @@ fn add_static_crate<'a>(
sess.target.no_builtins || !codegen_results.crate_info.is_no_builtins.contains(&cnum);
let mut archive = archive_builder_builder.new_archive_builder(sess);
- if let Err(e) = archive.add_archive(
+ if let Err(error) = archive.add_archive(
cratepath,
Box::new(move |f| {
if f == METADATA_FILENAME {
@@ -2652,7 +2667,7 @@ fn add_static_crate<'a>(
false
}),
) {
- sess.fatal(&format!("failed to build archive from rlib: {}", e));
+ sess.emit_fatal(errors::RlibArchiveBuildFailure { error });
}
if archive.build(&dst) {
link_upstream(&dst);
@@ -2822,11 +2837,30 @@ fn add_gcc_ld_path(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
// Implement the "linker flavor" part of -Zgcc-ld
// by asking cc to use some kind of lld.
cmd.arg("-fuse-ld=lld");
+
if !flavor.is_gnu() {
// Tell clang to use a non-default LLD flavor.
// Gcc doesn't understand the target option, but we currently assume
// that gcc is not used for Apple and Wasm targets (#97402).
- cmd.arg(format!("--target={}", sess.target.llvm_target));
+ //
+ // Note that we don't want to do that by default on macOS: e.g. passing a
+ // 10.7 target to LLVM works, but not to recent versions of clang/macOS, as
+ // shown in issue #101653 and the discussion in PR #101792.
+ //
+ // It could be required in some cases of cross-compiling with
+ // `-Zgcc-ld=lld`, but this is generally unspecified, and we don't know
+ // which specific versions of clang, macOS SDK, host and target OS
+ // combinations impact us here.
+ //
+ // So we do a simple first-approximation until we know more of what the
+ // Apple targets require (and which would be handled prior to hitting this
+ // `-Zgcc-ld=lld` codepath anyway), but the expectation is that until then
+ // this should be manually passed if needed. We specify the target when
+ // targeting a different linker flavor on macOS, and that's also always
+ // the case when targeting WASM.
+ if sess.target.linker_flavor != sess.host.linker_flavor {
+ cmd.arg(format!("--target={}", sess.target.llvm_target));
+ }
}
}
}
diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs
index f087d903e..eaf1e9817 100644
--- a/compiler/rustc_codegen_ssa/src/back/linker.rs
+++ b/compiler/rustc_codegen_ssa/src/back/linker.rs
@@ -108,7 +108,7 @@ pub fn get_linker<'a>(
if sess.target.is_like_msvc {
if let Some(ref tool) = msvc_tool {
cmd.args(tool.args());
- for &(ref k, ref v) in tool.env() {
+ for (k, v) in tool.env() {
if k == "PATH" {
new_path.extend(env::split_paths(v));
msvc_changed_path = true;
@@ -544,7 +544,7 @@ impl<'a> Linker for GccLinker<'a> {
// link times negatively.
//
// -dead_strip can't be part of the pre_link_args because it's also used
- // for partial linking when using multiple codegen units (-r). So we
+ // for partial linking when using multiple codegen units (-r). So we
// insert it here.
if self.sess.target.is_like_osx {
self.linker_arg("-dead_strip");
diff --git a/compiler/rustc_codegen_ssa/src/back/metadata.rs b/compiler/rustc_codegen_ssa/src/back/metadata.rs
index 51c5c375d..7d3c14fec 100644
--- a/compiler/rustc_codegen_ssa/src/back/metadata.rs
+++ b/compiler/rustc_codegen_ssa/src/back/metadata.rs
@@ -100,7 +100,13 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
};
let architecture = match &sess.target.arch[..] {
"arm" => Architecture::Arm,
- "aarch64" => Architecture::Aarch64,
+ "aarch64" => {
+ if sess.target.pointer_width == 32 {
+ Architecture::Aarch64_Ilp32
+ } else {
+ Architecture::Aarch64
+ }
+ }
"x86" => Architecture::I386,
"s390x" => Architecture::S390x,
"mips" => Architecture::Mips,
@@ -165,11 +171,23 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
};
e_flags
}
- Architecture::Riscv64 if sess.target.options.features.contains("+d") => {
- // copied from `riscv64-linux-gnu-gcc foo.c -c`, note though
- // that the `+d` target feature represents whether the double
- // float abi is enabled.
- let e_flags = elf::EF_RISCV_RVC | elf::EF_RISCV_FLOAT_ABI_DOUBLE;
+ Architecture::Riscv32 | Architecture::Riscv64 => {
+ // Source: https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/079772828bd10933d34121117a222b4cc0ee2200/riscv-elf.adoc
+ let mut e_flags: u32 = 0x0;
+ let features = &sess.target.options.features;
+ // Check if compressed is enabled
+ if features.contains("+c") {
+ e_flags |= elf::EF_RISCV_RVC;
+ }
+
+ // Select the appropriate floating-point ABI
+ if features.contains("+d") {
+ e_flags |= elf::EF_RISCV_FLOAT_ABI_DOUBLE;
+ } else if features.contains("+f") {
+ e_flags |= elf::EF_RISCV_FLOAT_ABI_SINGLE;
+ } else {
+ e_flags |= elf::EF_RISCV_FLOAT_ABI_SOFT;
+ }
e_flags
}
_ => 0,
diff --git a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
index 22f534d90..57a99e74c 100644
--- a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
+++ b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
@@ -163,21 +163,25 @@ fn is_reachable_non_generic_provider_extern(tcx: TyCtxt<'_>, def_id: DefId) -> b
tcx.reachable_non_generics(def_id.krate).contains_key(&def_id)
}
-fn exported_symbols_provider_local<'tcx>(
- tcx: TyCtxt<'tcx>,
+fn exported_symbols_provider_local(
+ tcx: TyCtxt<'_>,
cnum: CrateNum,
-) -> &'tcx [(ExportedSymbol<'tcx>, SymbolExportInfo)] {
+) -> &[(ExportedSymbol<'_>, SymbolExportInfo)] {
assert_eq!(cnum, LOCAL_CRATE);
if !tcx.sess.opts.output_types.should_codegen() {
return &[];
}
- let mut symbols: Vec<_> = tcx
- .reachable_non_generics(LOCAL_CRATE)
- .iter()
- .map(|(&def_id, &info)| (ExportedSymbol::NonGeneric(def_id), info))
- .collect();
+ // FIXME: Sorting this is unnecessary since we are sorting later anyway.
+ // Can we skip the later sorting?
+ let mut symbols: Vec<_> = tcx.with_stable_hashing_context(|hcx| {
+ tcx.reachable_non_generics(LOCAL_CRATE)
+ .to_sorted(&hcx, true)
+ .into_iter()
+ .map(|(&def_id, &info)| (ExportedSymbol::NonGeneric(def_id), info))
+ .collect()
+ });
if tcx.entry_fn(()).is_some() {
let exported_symbol =
diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs
index 12fca6496..9f1614af7 100644
--- a/compiler/rustc_codegen_ssa/src/back/write.rs
+++ b/compiler/rustc_codegen_ssa/src/back/write.rs
@@ -105,7 +105,7 @@ pub struct ModuleConfig {
pub emit_thin_lto: bool,
pub bc_cmdline: String,
- // Miscellaneous flags. These are mostly copied from command-line
+ // Miscellaneous flags. These are mostly copied from command-line
// options.
pub verify_llvm_ir: bool,
pub no_prepopulate_passes: bool,
@@ -538,7 +538,7 @@ fn produce_final_output_artifacts(
let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| {
if compiled_modules.modules.len() == 1 {
- // 1) Only one codegen unit. In this case it's no difficulty
+ // 1) Only one codegen unit. In this case it's no difficulty
// to copy `foo.0.x` to `foo.x`.
let module_name = Some(&compiled_modules.modules[0].name[..]);
let path = crate_output.temp_path(output_type, module_name);
@@ -557,15 +557,15 @@ fn produce_final_output_artifacts(
.to_owned();
if crate_output.outputs.contains_key(&output_type) {
- // 2) Multiple codegen units, with `--emit foo=some_name`. We have
+ // 2) Multiple codegen units, with `--emit foo=some_name`. We have
// no good solution for this case, so warn the user.
sess.emit_warning(errors::IgnoringEmitPath { extension });
} else if crate_output.single_output_file.is_some() {
- // 3) Multiple codegen units, with `-o some_name`. We have
+ // 3) Multiple codegen units, with `-o some_name`. We have
// no good solution for this case, so warn the user.
sess.emit_warning(errors::IgnoringOutput { extension });
} else {
- // 4) Multiple codegen units, but no explicit name. We
+ // 4) Multiple codegen units, but no explicit name. We
// just leave the `foo.0.x` files in place.
// (We don't have to do any work in this case.)
}
@@ -579,7 +579,7 @@ fn produce_final_output_artifacts(
match *output_type {
OutputType::Bitcode => {
user_wants_bitcode = true;
- // Copy to .bc, but always keep the .0.bc. There is a later
+ // Copy to .bc, but always keep the .0.bc. There is a later
// check to figure out if we should delete .0.bc files, or keep
// them for making an rlib.
copy_if_one_unit(OutputType::Bitcode, true);
@@ -611,7 +611,7 @@ fn produce_final_output_artifacts(
// `-C save-temps` or `--emit=` flags).
if !sess.opts.cg.save_temps {
- // Remove the temporary .#module-name#.o objects. If the user didn't
+ // Remove the temporary .#module-name#.o objects. If the user didn't
// explicitly request bitcode (with --emit=bc), and the bitcode is not
// needed for building an rlib, then we must remove .#module-name#.bc as
// well.
@@ -1002,7 +1002,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
let sess = tcx.sess;
let mut each_linked_rlib_for_lto = Vec::new();
- drop(link::each_linked_rlib(sess, crate_info, &mut |cnum, path| {
+ drop(link::each_linked_rlib(crate_info, None, &mut |cnum, path| {
if link::ignored_for_lto(sess, crate_info, cnum) {
return;
}
@@ -1098,7 +1098,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
// There are a few environmental pre-conditions that shape how the system
// is set up:
//
- // - Error reporting only can happen on the main thread because that's the
+ // - Error reporting can only happen on the main thread because that's the
// only place where we have access to the compiler `Session`.
// - LLVM work can be done on any thread.
// - Codegen can only happen on the main thread.
@@ -1110,16 +1110,16 @@ fn start_executing_work<B: ExtraBackendMethods>(
// Error Reporting
// ===============
// The error reporting restriction is handled separately from the rest: We
- // set up a `SharedEmitter` the holds an open channel to the main thread.
+ // set up a `SharedEmitter` that holds an open channel to the main thread.
// When an error occurs on any thread, the shared emitter will send the
// error message to the receiver main thread (`SharedEmitterMain`). The
// main thread will periodically query this error message queue and emit
// any error messages it has received. It might even abort compilation if
- // has received a fatal error. In this case we rely on all other threads
+ // it has received a fatal error. In this case we rely on all other threads
// being torn down automatically with the main thread.
// Since the main thread will often be busy doing codegen work, error
// reporting will be somewhat delayed, since the message queue can only be
- // checked in between to work packages.
+ // checked in between two work packages.
//
// Work Processing Infrastructure
// ==============================
@@ -1133,7 +1133,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
// thread about what work to do when, and it will spawn off LLVM worker
// threads as open LLVM WorkItems become available.
//
- // The job of the main thread is to codegen CGUs into LLVM work package
+ // The job of the main thread is to codegen CGUs into LLVM work packages
// (since the main thread is the only thread that can do this). The main
// thread will block until it receives a message from the coordinator, upon
// which it will codegen one CGU, send it to the coordinator and block
@@ -1142,10 +1142,10 @@ fn start_executing_work<B: ExtraBackendMethods>(
//
// The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is
// available, it will spawn off a new LLVM worker thread and let it process
- // that a WorkItem. When a LLVM worker thread is done with its WorkItem,
+ // a WorkItem. When a LLVM worker thread is done with its WorkItem,
// it will just shut down, which also frees all resources associated with
// the given LLVM module, and sends a message to the coordinator that the
- // has been completed.
+ // WorkItem has been completed.
//
// Work Scheduling
// ===============
@@ -1165,7 +1165,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
//
// Doing LLVM Work on the Main Thread
// ----------------------------------
- // Since the main thread owns the compiler processes implicit `Token`, it is
+ // Since the main thread owns the compiler process's implicit `Token`, it is
// wasteful to keep it blocked without doing any work. Therefore, what we do
// in this case is: We spawn off an additional LLVM worker thread that helps
// reduce the queue. The work it is doing corresponds to the implicit
@@ -1216,7 +1216,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
// ------------------------------
//
// The final job the coordinator thread is responsible for is managing LTO
- // and how that works. When LTO is requested what we'll to is collect all
+ // and how that works. When LTO is requested what we'll do is collect all
// optimized LLVM modules into a local vector on the coordinator. Once all
// modules have been codegened and optimized we hand this to the `lto`
// module for further optimization. The `lto` module will return back a list
@@ -1899,7 +1899,7 @@ impl<B: ExtraBackendMethods> OngoingCodegen<B> {
// FIXME: time_llvm_passes support - does this use a global context or
// something?
- if sess.codegen_units() == 1 && sess.time_llvm_passes() {
+ if sess.codegen_units() == 1 && sess.opts.unstable_opts.time_llvm_passes {
self.backend.print_pass_timings()
}