summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_ssa
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_ssa')
-rw-r--r--compiler/rustc_codegen_ssa/messages.ftl (renamed from compiler/rustc_codegen_ssa/locales/en-US.ftl)2
-rw-r--r--compiler/rustc_codegen_ssa/src/back/link.rs28
-rw-r--r--compiler/rustc_codegen_ssa/src/back/linker.rs180
-rw-r--r--compiler/rustc_codegen_ssa/src/back/metadata.rs22
-rw-r--r--compiler/rustc_codegen_ssa/src/back/symbol_export.rs69
-rw-r--r--compiler/rustc_codegen_ssa/src/back/write.rs9
-rw-r--r--compiler/rustc_codegen_ssa/src/base.rs56
-rw-r--r--compiler/rustc_codegen_ssa/src/codegen_attrs.rs625
-rw-r--r--compiler/rustc_codegen_ssa/src/coverageinfo/map.rs4
-rw-r--r--compiler/rustc_codegen_ssa/src/errors.rs4
-rw-r--r--compiler/rustc_codegen_ssa/src/glue.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/lib.rs5
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/analyze.rs21
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs356
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/debuginfo.rs49
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/mod.rs25
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/operand.rs51
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/place.rs97
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs274
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/statement.rs7
-rw-r--r--compiler/rustc_codegen_ssa/src/target_features.rs12
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/consts.rs1
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/type_.rs11
23 files changed, 1140 insertions, 770 deletions
diff --git a/compiler/rustc_codegen_ssa/locales/en-US.ftl b/compiler/rustc_codegen_ssa/messages.ftl
index 8fe5f8d50..243be0e1f 100644
--- a/compiler/rustc_codegen_ssa/locales/en-US.ftl
+++ b/compiler/rustc_codegen_ssa/messages.ftl
@@ -141,7 +141,7 @@ codegen_ssa_msvc_missing_linker = the msvc targets depend on the msvc linker but
codegen_ssa_check_installed_visual_studio = please ensure that Visual Studio 2017 or later, or Build Tools for Visual Studio were installed with the Visual C++ option.
-codegen_ssa_unsufficient_vs_code_product = VS Code is a different product, and is not sufficient.
+codegen_ssa_insufficient_vs_code_product = VS Code is a different product, and is not sufficient.
codegen_ssa_processing_dymutil_failed = processing debug info with `dsymutil` failed: {$status}
.note = {$output}
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs
index 8bb143ed3..eecfe13bb 100644
--- a/compiler/rustc_codegen_ssa/src/back/link.rs
+++ b/compiler/rustc_codegen_ssa/src/back/link.rs
@@ -349,7 +349,10 @@ fn link_rlib<'a>(
let NativeLibKind::Static { bundle: None | Some(true), whole_archive } = lib.kind else {
continue;
};
- if whole_archive == Some(true) && !codegen_results.crate_info.feature_packed_bundled_libs {
+ if whole_archive == Some(true)
+ && flavor == RlibFlavor::Normal
+ && !codegen_results.crate_info.feature_packed_bundled_libs
+ {
sess.emit_err(errors::IncompatibleLinkingModifiers);
}
if flavor == RlibFlavor::Normal && let Some(filename) = lib.filename {
@@ -358,9 +361,9 @@ fn link_rlib<'a>(
let (data, _) = create_wrapper_file(sess, b".bundled_lib".to_vec(), &src);
let wrapper_file = emit_wrapper_file(sess, &data, tmpdir, filename.as_str());
packed_bundled_libs.push(wrapper_file);
- } else if let Some(name) = lib.name {
+ } else {
let path =
- find_native_static_library(name.as_str(), lib.verbatim, &lib_search_paths, sess);
+ find_native_static_library(lib.name.as_str(), lib.verbatim, &lib_search_paths, sess);
ab.add_archive(&path, Box::new(|_| false)).unwrap_or_else(|error| {
sess.emit_fatal(errors::AddNativeLibrary { library_path: path, error })});
}
@@ -436,7 +439,7 @@ fn collate_raw_dylibs<'a, 'b>(
for lib in used_libraries {
if lib.kind == NativeLibKind::RawDylib {
let ext = if lib.verbatim { "" } else { ".dll" };
- let name = format!("{}{}", lib.name.expect("unnamed raw-dylib library"), ext);
+ let name = format!("{}{}", lib.name, ext);
let imports = dylib_table.entry(name.clone()).or_default();
for import in &lib.dll_imports {
if let Some(old_import) = imports.insert(import.name, import) {
@@ -923,7 +926,7 @@ fn link_natively<'a>(
if sess.target.is_like_msvc && linker_not_found {
sess.emit_note(errors::MsvcMissingLinker);
sess.emit_note(errors::CheckInstalledVisualStudio);
- sess.emit_note(errors::UnsufficientVSCodeProduct);
+ sess.emit_note(errors::InsufficientVSCodeProduct);
}
sess.abort_if_errors();
}
@@ -1296,7 +1299,7 @@ fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLib]) {
.iter()
.filter(|l| relevant_lib(sess, l))
.filter_map(|lib| {
- let name = lib.name?;
+ let name = lib.name;
match lib.kind {
NativeLibKind::Static { bundle: Some(false), .. }
| NativeLibKind::Dylib { .. }
@@ -1317,6 +1320,7 @@ fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLib]) {
// These are included, no need to print them
NativeLibKind::Static { bundle: None | Some(true), .. }
| NativeLibKind::LinkArg
+ | NativeLibKind::WasmImportModule
| NativeLibKind::RawDylib => None,
}
})
@@ -2275,21 +2279,18 @@ fn add_native_libs_from_crate(
let mut last = (None, NativeLibKind::Unspecified, false);
for lib in native_libs {
- let Some(name) = lib.name else {
- continue;
- };
if !relevant_lib(sess, lib) {
continue;
}
// Skip if this library is the same as the last.
- last = if (lib.name, lib.kind, lib.verbatim) == last {
+ last = if (Some(lib.name), lib.kind, lib.verbatim) == last {
continue;
} else {
- (lib.name, lib.kind, lib.verbatim)
+ (Some(lib.name), lib.kind, lib.verbatim)
};
- let name = name.as_str();
+ let name = lib.name.as_str();
let verbatim = lib.verbatim;
match lib.kind {
NativeLibKind::Static { bundle, whole_archive } => {
@@ -2303,7 +2304,7 @@ fn add_native_libs_from_crate(
|| (whole_archive == None
&& bundle
&& cnum == LOCAL_CRATE
- && sess.opts.test);
+ && sess.is_test_crate());
if bundle && cnum != LOCAL_CRATE {
if let Some(filename) = lib.filename {
@@ -2346,6 +2347,7 @@ fn add_native_libs_from_crate(
NativeLibKind::RawDylib => {
// Handled separately in `linker_with_args`.
}
+ NativeLibKind::WasmImportModule => {}
NativeLibKind::LinkArg => {
if link_static {
cmd.arg(name);
diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs
index 52c01b423..65dfc325a 100644
--- a/compiler/rustc_codegen_ssa/src/back/linker.rs
+++ b/compiler/rustc_codegen_ssa/src/back/linker.rs
@@ -133,6 +133,9 @@ pub fn get_linker<'a>(
LinkerFlavor::Unix(Cc::No) if sess.target.os == "l4re" => {
Box::new(L4Bender::new(cmd, sess)) as Box<dyn Linker>
}
+ LinkerFlavor::Unix(Cc::No) if sess.target.os == "aix" => {
+ Box::new(AixLinker::new(cmd, sess)) as Box<dyn Linker>
+ }
LinkerFlavor::WasmLld(Cc::No) => Box::new(WasmLd::new(cmd, sess)) as Box<dyn Linker>,
LinkerFlavor::Gnu(cc, _)
| LinkerFlavor::Darwin(cc, _)
@@ -720,6 +723,7 @@ impl<'a> Linker for GccLinker<'a> {
let mut arg = OsString::from("--version-script=");
arg.push(path);
self.linker_arg(arg);
+ self.linker_arg("--no-undefined-version");
}
}
}
@@ -1117,9 +1121,12 @@ impl<'a> Linker for EmLinker<'a> {
fn debuginfo(&mut self, _strip: Strip, _: &[PathBuf]) {
// Preserve names or generate source maps depending on debug info
+ // For more information see https://emscripten.org/docs/tools_reference/emcc.html#emcc-g
self.cmd.arg(match self.sess.opts.debuginfo {
DebugInfo::None => "-g0",
- DebugInfo::Limited => "--profiling-funcs",
+ DebugInfo::Limited | DebugInfo::LineTablesOnly | DebugInfo::LineDirectivesOnly => {
+ "--profiling-funcs"
+ }
DebugInfo::Full => "-g",
});
}
@@ -1473,6 +1480,177 @@ impl<'a> L4Bender<'a> {
}
}
+/// Linker for AIX.
+pub struct AixLinker<'a> {
+ cmd: Command,
+ sess: &'a Session,
+ hinted_static: bool,
+}
+
+impl<'a> AixLinker<'a> {
+ pub fn new(cmd: Command, sess: &'a Session) -> AixLinker<'a> {
+ AixLinker { cmd: cmd, sess: sess, hinted_static: false }
+ }
+
+ fn hint_static(&mut self) {
+ if !self.hinted_static {
+ self.cmd.arg("-bstatic");
+ self.hinted_static = true;
+ }
+ }
+
+ fn hint_dynamic(&mut self) {
+ if self.hinted_static {
+ self.cmd.arg("-bdynamic");
+ self.hinted_static = false;
+ }
+ }
+
+ fn build_dylib(&mut self, _out_filename: &Path) {
+ self.cmd.arg("-bM:SRE");
+ self.cmd.arg("-bnoentry");
+ // FIXME: Use CreateExportList utility to create export list
+ // and remove -bexpfull.
+ self.cmd.arg("-bexpfull");
+ }
+}
+
+impl<'a> Linker for AixLinker<'a> {
+ fn link_dylib(&mut self, lib: &str, _verbatim: bool, _as_needed: bool) {
+ self.hint_dynamic();
+ self.cmd.arg(format!("-l{}", lib));
+ }
+
+ fn link_staticlib(&mut self, lib: &str, _verbatim: bool) {
+ self.hint_static();
+ self.cmd.arg(format!("-l{}", lib));
+ }
+
+ fn link_rlib(&mut self, lib: &Path) {
+ self.hint_static();
+ self.cmd.arg(lib);
+ }
+
+ fn include_path(&mut self, path: &Path) {
+ self.cmd.arg("-L").arg(path);
+ }
+
+ fn framework_path(&mut self, _: &Path) {
+ bug!("frameworks are not supported on AIX");
+ }
+
+ fn output_filename(&mut self, path: &Path) {
+ self.cmd.arg("-o").arg(path);
+ }
+
+ fn add_object(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+
+ fn full_relro(&mut self) {}
+
+ fn partial_relro(&mut self) {}
+
+ fn no_relro(&mut self) {}
+
+ fn cmd(&mut self) -> &mut Command {
+ &mut self.cmd
+ }
+
+ fn set_output_kind(&mut self, output_kind: LinkOutputKind, out_filename: &Path) {
+ match output_kind {
+ LinkOutputKind::DynamicDylib => {
+ self.hint_dynamic();
+ self.build_dylib(out_filename);
+ }
+ LinkOutputKind::StaticDylib => {
+ self.hint_static();
+ self.build_dylib(out_filename);
+ }
+ _ => {}
+ }
+ }
+
+ fn link_rust_dylib(&mut self, lib: &str, _: &Path) {
+ self.hint_dynamic();
+ self.cmd.arg(format!("-l{}", lib));
+ }
+
+ fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
+ bug!("frameworks not supported on AIX");
+ }
+
+ fn link_whole_staticlib(&mut self, lib: &str, verbatim: bool, search_path: &[PathBuf]) {
+ self.hint_static();
+ let lib = find_native_static_library(lib, verbatim, search_path, &self.sess);
+ self.cmd.arg(format!("-bkeepfile:{}", lib.to_str().unwrap()));
+ }
+
+ fn link_whole_rlib(&mut self, lib: &Path) {
+ self.hint_static();
+ self.cmd.arg(format!("-bkeepfile:{}", lib.to_str().unwrap()));
+ }
+
+ fn gc_sections(&mut self, _keep_metadata: bool) {
+ self.cmd.arg("-bgc");
+ }
+
+ fn no_gc_sections(&mut self) {
+ self.cmd.arg("-bnogc");
+ }
+
+ fn optimize(&mut self) {}
+
+ fn pgo_gen(&mut self) {}
+
+ fn control_flow_guard(&mut self) {}
+
+ fn debuginfo(&mut self, strip: Strip, _: &[PathBuf]) {
+ match strip {
+ Strip::None => {}
+ // FIXME: -s strips the symbol table, line number information
+ // and relocation information.
+ Strip::Debuginfo | Strip::Symbols => {
+ self.cmd.arg("-s");
+ }
+ }
+ }
+
+ fn no_crt_objects(&mut self) {}
+
+ fn no_default_libraries(&mut self) {}
+
+ fn export_symbols(&mut self, tmpdir: &Path, _crate_type: CrateType, symbols: &[String]) {
+ let path = tmpdir.join("list.exp");
+ let res: io::Result<()> = try {
+ let mut f = BufWriter::new(File::create(&path)?);
+ // FIXME: use llvm-nm to generate export list.
+ for symbol in symbols {
+ debug!(" _{}", symbol);
+ writeln!(f, " {}", symbol)?;
+ }
+ };
+ if let Err(e) = res {
+ self.sess.fatal(&format!("failed to write export file: {}", e));
+ }
+ self.cmd.arg(format!("-bE:{}", path.to_str().unwrap()));
+ }
+
+ fn subsystem(&mut self, _subsystem: &str) {}
+
+ fn reset_per_library_state(&mut self) {
+ self.hint_dynamic();
+ }
+
+ fn linker_plugin_lto(&mut self) {}
+
+ fn add_eh_frame_header(&mut self) {}
+
+ fn add_no_exec(&mut self) {}
+
+ fn add_as_needed(&mut self) {}
+}
+
fn for_each_exported_symbols_include_dep<'tcx>(
tcx: TyCtxt<'tcx>,
crate_type: CrateType,
diff --git a/compiler/rustc_codegen_ssa/src/back/metadata.rs b/compiler/rustc_codegen_ssa/src/back/metadata.rs
index 019ec0758..d5d843702 100644
--- a/compiler/rustc_codegen_ssa/src/back/metadata.rs
+++ b/compiler/rustc_codegen_ssa/src/back/metadata.rs
@@ -13,8 +13,7 @@ use object::{
use snap::write::FrameEncoder;
use rustc_data_structures::memmap::Mmap;
-use rustc_data_structures::owning_ref::OwningRef;
-use rustc_data_structures::rustc_erase_owner;
+use rustc_data_structures::owned_slice::try_slice_owned;
use rustc_data_structures::sync::MetadataRef;
use rustc_metadata::fs::METADATA_FILENAME;
use rustc_metadata::EncodedMetadata;
@@ -42,10 +41,10 @@ fn load_metadata_with(
) -> Result<MetadataRef, String> {
let file =
File::open(path).map_err(|e| format!("failed to open file '{}': {}", path.display(), e))?;
- let data = unsafe { Mmap::map(file) }
- .map_err(|e| format!("failed to mmap file '{}': {}", path.display(), e))?;
- let metadata = OwningRef::new(data).try_map(f)?;
- return Ok(rustc_erase_owner!(metadata.map_owner_box()));
+
+ unsafe { Mmap::map(file) }
+ .map_err(|e| format!("failed to mmap file '{}': {}", path.display(), e))
+ .and_then(|mmap| try_slice_owned(mmap, |mmap| f(mmap)))
}
impl MetadataLoader for DefaultMetadataLoader {
@@ -128,6 +127,7 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
"msp430" => Architecture::Msp430,
"hexagon" => Architecture::Hexagon,
"bpf" => Architecture::Bpf,
+ "loongarch64" => Architecture::LoongArch64,
// Unsupported architecture.
_ => return None,
};
@@ -191,6 +191,10 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
}
e_flags
}
+ Architecture::LoongArch64 => {
+ // Source: https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html#_e_flags_identifies_abi_type_and_version
+ elf::EF_LARCH_OBJABI_V1 | elf::EF_LARCH_ABI_DOUBLE_FLOAT
+ }
_ => 0,
};
// adapted from LLVM's `MCELFObjectTargetWriter::getOSABI`
@@ -306,7 +310,13 @@ pub fn create_compressed_metadata_file(
symbol_name: &str,
) -> Vec<u8> {
let mut compressed = rustc_metadata::METADATA_HEADER.to_vec();
+ // Our length will be backfilled once we're done writing
+ compressed.write_all(&[0; 4]).unwrap();
FrameEncoder::new(&mut compressed).write_all(metadata.raw_data()).unwrap();
+ let meta_len = rustc_metadata::METADATA_HEADER.len();
+ let data_len = (compressed.len() - meta_len - 4) as u32;
+ compressed[meta_len..meta_len + 4].copy_from_slice(&data_len.to_be_bytes());
+
let Some(mut file) = create_object_file(sess) else {
return compressed.to_vec();
};
diff --git a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
index 067a3e167..d0fd3cd76 100644
--- a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
+++ b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
@@ -1,3 +1,5 @@
+use crate::base::allocator_kind_for_codegen;
+
use std::collections::hash_map::Entry::*;
use rustc_ast::expand::allocator::ALLOCATOR_METHODS;
@@ -8,10 +10,11 @@ use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::middle::exported_symbols::{
metadata_symbol_name, ExportedSymbol, SymbolExportInfo, SymbolExportKind, SymbolExportLevel,
};
+use rustc_middle::query::LocalCrate;
use rustc_middle::ty::query::{ExternProviders, Providers};
use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
use rustc_middle::ty::Instance;
-use rustc_middle::ty::{self, DefIdTree, SymbolName, TyCtxt};
+use rustc_middle::ty::{self, SymbolName, TyCtxt};
use rustc_session::config::{CrateType, OomStrategy};
use rustc_target::spec::SanitizerSet;
@@ -39,9 +42,7 @@ pub fn crates_export_threshold(crate_types: &[CrateType]) -> SymbolExportLevel {
}
}
-fn reachable_non_generics_provider(tcx: TyCtxt<'_>, cnum: CrateNum) -> DefIdMap<SymbolExportInfo> {
- assert_eq!(cnum, LOCAL_CRATE);
-
+fn reachable_non_generics_provider(tcx: TyCtxt<'_>, _: LocalCrate) -> DefIdMap<SymbolExportInfo> {
if !tcx.sess.opts.output_types.should_codegen() {
return Default::default();
}
@@ -58,7 +59,7 @@ fn reachable_non_generics_provider(tcx: TyCtxt<'_>, cnum: CrateNum) -> DefIdMap<
let mut reachable_non_generics: DefIdMap<_> = tcx
.reachable_set(())
- .iter()
+ .items()
.filter_map(|&def_id| {
// We want to ignore some FFI functions that are not exposed from
// this crate. Reachable FFI functions can be lumped into two
@@ -136,7 +137,7 @@ fn reachable_non_generics_provider(tcx: TyCtxt<'_>, cnum: CrateNum) -> DefIdMap<
};
(def_id.to_def_id(), info)
})
- .collect();
+ .into();
if let Some(id) = tcx.proc_macro_decls_static(()) {
reachable_non_generics.insert(
@@ -152,10 +153,10 @@ fn reachable_non_generics_provider(tcx: TyCtxt<'_>, cnum: CrateNum) -> DefIdMap<
reachable_non_generics
}
-fn is_reachable_non_generic_provider_local(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+fn is_reachable_non_generic_provider_local(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
let export_threshold = threshold(tcx);
- if let Some(&info) = tcx.reachable_non_generics(def_id.krate).get(&def_id) {
+ if let Some(&info) = tcx.reachable_non_generics(LOCAL_CRATE).get(&def_id.to_def_id()) {
info.level.is_below_threshold(export_threshold)
} else {
false
@@ -168,24 +169,37 @@ fn is_reachable_non_generic_provider_extern(tcx: TyCtxt<'_>, def_id: DefId) -> b
fn exported_symbols_provider_local(
tcx: TyCtxt<'_>,
- cnum: CrateNum,
+ _: LocalCrate,
) -> &[(ExportedSymbol<'_>, SymbolExportInfo)] {
- assert_eq!(cnum, LOCAL_CRATE);
-
if !tcx.sess.opts.output_types.should_codegen() {
return &[];
}
// FIXME: Sorting this is unnecessary since we are sorting later anyway.
// Can we skip the later sorting?
- let mut symbols: Vec<_> = tcx.with_stable_hashing_context(|hcx| {
- tcx.reachable_non_generics(LOCAL_CRATE)
- .to_sorted(&hcx, true)
- .into_iter()
- .map(|(&def_id, &info)| (ExportedSymbol::NonGeneric(def_id), info))
- .collect()
+ let sorted = tcx.with_stable_hashing_context(|hcx| {
+ tcx.reachable_non_generics(LOCAL_CRATE).to_sorted(&hcx, true)
});
+ let mut symbols: Vec<_> =
+ sorted.iter().map(|(&def_id, &info)| (ExportedSymbol::NonGeneric(def_id), info)).collect();
+
+ // Export TLS shims
+ if !tcx.sess.target.dll_tls_export {
+ symbols.extend(sorted.iter().filter_map(|(&def_id, &info)| {
+ tcx.needs_thread_local_shim(def_id).then(|| {
+ (
+ ExportedSymbol::ThreadLocalShim(def_id),
+ SymbolExportInfo {
+ level: info.level,
+ kind: SymbolExportKind::Text,
+ used: info.used,
+ },
+ )
+ })
+ }))
+ }
+
if tcx.entry_fn(()).is_some() {
let exported_symbol =
ExportedSymbol::NoDefId(SymbolName::new(tcx, tcx.sess.target.entry_name.as_ref()));
@@ -200,7 +214,8 @@ fn exported_symbols_provider_local(
));
}
- if tcx.allocator_kind(()).is_some() {
+ // Mark allocator shim symbols as exported only if they were generated.
+ if allocator_kind_for_codegen(tcx).is_some() {
for symbol_name in ALLOCATOR_METHODS
.iter()
.map(|method| format!("__rust_{}", method.name))
@@ -380,7 +395,9 @@ fn upstream_monomorphizations_provider(
continue;
}
}
- ExportedSymbol::NonGeneric(..) | ExportedSymbol::NoDefId(..) => {
+ ExportedSymbol::NonGeneric(..)
+ | ExportedSymbol::ThreadLocalShim(..)
+ | ExportedSymbol::NoDefId(..) => {
// These are no monomorphizations
continue;
}
@@ -500,6 +517,16 @@ pub fn symbol_name_for_instance_in_crate<'tcx>(
instantiating_crate,
)
}
+ ExportedSymbol::ThreadLocalShim(def_id) => {
+ rustc_symbol_mangling::symbol_name_for_instance_in_crate(
+ tcx,
+ ty::Instance {
+ def: ty::InstanceDef::ThreadLocalShim(def_id),
+ substs: ty::InternalSubsts::empty(),
+ },
+ instantiating_crate,
+ )
+ }
ExportedSymbol::DropGlue(ty) => rustc_symbol_mangling::symbol_name_for_instance_in_crate(
tcx,
Instance::resolve_drop_in_place(tcx, ty),
@@ -548,6 +575,8 @@ pub fn linking_symbol_name_for_instance_in_crate<'tcx>(
ExportedSymbol::DropGlue(..) => None,
// NoDefId always follow the target's default symbol decoration scheme.
ExportedSymbol::NoDefId(..) => None,
+ // ThreadLocalShim always follow the target's default symbol decoration scheme.
+ ExportedSymbol::ThreadLocalShim(..) => None,
};
let (conv, args) = instance
@@ -592,7 +621,7 @@ fn wasm_import_module_map(tcx: TyCtxt<'_>, cnum: CrateNum) -> FxHashMap<DefId, S
let mut ret = FxHashMap::default();
for (def_id, lib) in tcx.foreign_modules(cnum).iter() {
- let module = def_id_to_native_lib.get(&def_id).and_then(|s| s.wasm_import_module);
+ let module = def_id_to_native_lib.get(&def_id).and_then(|s| s.wasm_import_module());
let Some(module) = module else { continue };
ret.extend(lib.foreign_items.iter().map(|id| {
assert_eq!(id.krate, cnum);
diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs
index 8508ab875..2dda4cd16 100644
--- a/compiler/rustc_codegen_ssa/src/back/write.rs
+++ b/compiler/rustc_codegen_ssa/src/back/write.rs
@@ -8,6 +8,7 @@ use crate::{
CachedModuleCodegen, CodegenResults, CompiledModule, CrateInfo, ModuleCodegen, ModuleKind,
};
use jobserver::{Acquired, Client};
+use rustc_ast::attr;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::memmap::Mmap;
use rustc_data_structures::profiling::SelfProfilerRef;
@@ -447,8 +448,8 @@ pub fn start_async_codegen<B: ExtraBackendMethods>(
let sess = tcx.sess;
let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
- let no_builtins = tcx.sess.contains_name(crate_attrs, sym::no_builtins);
- let is_compiler_builtins = tcx.sess.contains_name(crate_attrs, sym::compiler_builtins);
+ let no_builtins = attr::contains_name(crate_attrs, sym::no_builtins);
+ let is_compiler_builtins = attr::contains_name(crate_attrs, sym::compiler_builtins);
let crate_info = CrateInfo::new(tcx, target_cpu);
@@ -1451,8 +1452,8 @@ fn start_executing_work<B: ExtraBackendMethods>(
Err(e) => {
let msg = &format!("failed to acquire jobserver token: {}", e);
shared_emitter.fatal(msg);
- // Exit the coordinator thread
- panic!("{}", msg)
+ codegen_done = true;
+ codegen_aborted = true;
}
}
}
diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs
index 73179249b..c5ca7936a 100644
--- a/compiler/rustc_codegen_ssa/src/base.rs
+++ b/compiler/rustc_codegen_ssa/src/base.rs
@@ -13,6 +13,7 @@ use crate::mir::place::PlaceRef;
use crate::traits::*;
use crate::{CachedModuleCodegen, CompiledModule, CrateInfo, MemFlags, ModuleCodegen, ModuleKind};
+use rustc_ast::expand::allocator::AllocatorKind;
use rustc_attr as attr;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
@@ -23,7 +24,6 @@ use rustc_data_structures::sync::ParallelIterator;
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_hir::lang_items::LangItem;
-use rustc_index::vec::Idx;
use rustc_metadata::EncodedMetadata;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
use rustc_middle::middle::exported_symbols;
@@ -39,7 +39,7 @@ use rustc_session::Session;
use rustc_span::symbol::sym;
use rustc_span::Symbol;
use rustc_span::{DebuggerVisualizerFile, DebuggerVisualizerType};
-use rustc_target::abi::{Align, VariantIdx};
+use rustc_target::abi::{Align, FIRST_VARIANT};
use std::collections::BTreeSet;
use std::time::{Duration, Instant};
@@ -306,9 +306,9 @@ pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
- for i in 0..def_a.variant(VariantIdx::new(0)).fields.len() {
- let src_f = src.project_field(bx, i);
- let dst_f = dst.project_field(bx, i);
+ for i in def_a.variant(FIRST_VARIANT).fields.indices() {
+ let src_f = src.project_field(bx, i.as_usize());
+ let dst_f = dst.project_field(bx, i.as_usize());
if dst_f.layout.is_zst() {
continue;
@@ -545,6 +545,23 @@ pub fn collect_debugger_visualizers_transitive(
.collect::<BTreeSet<_>>()
}
+/// Decide allocator kind to codegen. If `Some(_)` this will be the same as
+/// `tcx.allocator_kind`, but it may be `None` in more cases (e.g. if using
+/// allocator definitions from a dylib dependency).
+pub fn allocator_kind_for_codegen(tcx: TyCtxt<'_>) -> Option<AllocatorKind> {
+ // If the crate doesn't have an `allocator_kind` set then there's definitely
+ // no shim to generate. Otherwise we also check our dependency graph for all
+ // our output crate types. If anything there looks like its a `Dynamic`
+ // linkage, then it's already got an allocator shim and we'll be using that
+ // one instead. If nothing exists then it's our job to generate the
+ // allocator!
+ let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
+ use rustc_middle::middle::dependency_format::Linkage;
+ list.iter().any(|&linkage| linkage == Linkage::Dynamic)
+ });
+ if any_dynamic_crate { None } else { tcx.allocator_kind(()) }
+}
+
pub fn codegen_crate<B: ExtraBackendMethods>(
backend: B,
tcx: TyCtxt<'_>,
@@ -615,20 +632,7 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
);
// Codegen an allocator shim, if necessary.
- //
- // If the crate doesn't have an `allocator_kind` set then there's definitely
- // no shim to generate. Otherwise we also check our dependency graph for all
- // our output crate types. If anything there looks like its a `Dynamic`
- // linkage, then it's already got an allocator shim and we'll be using that
- // one instead. If nothing exists then it's our job to generate the
- // allocator!
- let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
- use rustc_middle::middle::dependency_format::Linkage;
- list.iter().any(|&linkage| linkage == Linkage::Dynamic)
- });
- let allocator_module = if any_dynamic_crate {
- None
- } else if let Some(kind) = tcx.allocator_kind(()) {
+ if let Some(kind) = allocator_kind_for_codegen(tcx) {
let llmod_id =
cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("allocator")).to_string();
let module_llvm = tcx.sess.time("write_allocator_module", || {
@@ -642,13 +646,10 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
)
});
- Some(ModuleCodegen { name: llmod_id, module_llvm, kind: ModuleKind::Allocator })
- } else {
- None
- };
-
- if let Some(allocator_module) = allocator_module {
- ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module);
+ ongoing_codegen.submit_pre_codegened_module_to_llvm(
+ tcx,
+ ModuleCodegen { name: llmod_id, module_llvm, kind: ModuleKind::Allocator },
+ );
}
// For better throughput during parallel processing by LLVM, we used to sort
@@ -784,6 +785,7 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
total_codegen_time,
start_rss.unwrap(),
end_rss,
+ tcx.sess.opts.unstable_opts.time_passes_format,
);
}
@@ -807,7 +809,7 @@ impl CrateInfo {
.collect();
let local_crate_name = tcx.crate_name(LOCAL_CRATE);
let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
- let subsystem = tcx.sess.first_attr_value_str_by_name(crate_attrs, sym::windows_subsystem);
+ let subsystem = attr::first_attr_value_str_by_name(crate_attrs, sym::windows_subsystem);
let windows_subsystem = subsystem.map(|subsystem| {
if subsystem != sym::windows && subsystem != sym::console {
tcx.sess.emit_fatal(errors::InvalidWindowsSubsystem { subsystem });
diff --git a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
index 7d5c00486..8542bab68 100644
--- a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
+++ b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
@@ -8,8 +8,9 @@ use rustc_hir::{lang_items, weak_lang_items::WEAK_LANG_ITEMS, LangItem};
use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
use rustc_middle::mir::mono::Linkage;
use rustc_middle::ty::query::Providers;
-use rustc_middle::ty::{self as ty, DefIdTree, TyCtxt};
+use rustc_middle::ty::{self as ty, TyCtxt};
use rustc_session::{lint, parse::feature_err};
+use rustc_span::symbol::Ident;
use rustc_span::{sym, Span};
use rustc_target::spec::{abi, SanitizerSet};
@@ -43,7 +44,7 @@ fn linkage_by_name(tcx: TyCtxt<'_>, def_id: LocalDefId, name: &str) -> Linkage {
}
}
-fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: DefId) -> CodegenFnAttrs {
+fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs {
if cfg!(debug_assertions) {
let def_kind = tcx.def_kind(did);
assert!(
@@ -52,7 +53,6 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: DefId) -> CodegenFnAttrs {
);
}
- let did = did.expect_local();
let attrs = tcx.hir().attrs(tcx.hir().local_def_id_to_hir_id(did));
let mut codegen_fn_attrs = CodegenFnAttrs::new();
if tcx.should_inherit_track_caller(did) {
@@ -61,351 +61,368 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: DefId) -> CodegenFnAttrs {
let supported_target_features = tcx.supported_target_features(LOCAL_CRATE);
- // In some cases, attribute are only valid on functions, but it's the `check_attr`
- // pass that check that they aren't used anywhere else, rather this module.
- // In these cases, we bail from performing further checks that are only meaningful for
- // functions (such as calling `fn_sig`, which ICEs if given a non-function). We also
- // report a delayed bug, just in case `check_attr` isn't doing its job.
- let validate_fn_only_attr = |attr_sp| -> bool {
- let def_kind = tcx.def_kind(did);
- if let DefKind::Fn | DefKind::AssocFn | DefKind::Variant | DefKind::Ctor(..) = def_kind {
- true
- } else {
- tcx.sess.delay_span_bug(attr_sp, "this attribute can only be applied to functions");
- false
- }
- };
-
let mut inline_span = None;
let mut link_ordinal_span = None;
let mut no_sanitize_span = None;
+
for attr in attrs.iter() {
- if attr.has_name(sym::cold) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::COLD;
- } else if attr.has_name(sym::rustc_allocator) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR;
- } else if attr.has_name(sym::ffi_returns_twice) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_RETURNS_TWICE;
- } else if attr.has_name(sym::ffi_pure) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_PURE;
- } else if attr.has_name(sym::ffi_const) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_CONST;
- } else if attr.has_name(sym::rustc_nounwind) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND;
- } else if attr.has_name(sym::rustc_reallocator) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::REALLOCATOR;
- } else if attr.has_name(sym::rustc_deallocator) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::DEALLOCATOR;
- } else if attr.has_name(sym::rustc_allocator_zeroed) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR_ZEROED;
- } else if attr.has_name(sym::naked) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::NAKED;
- } else if attr.has_name(sym::no_mangle) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_MANGLE;
- } else if attr.has_name(sym::no_coverage) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_COVERAGE;
- } else if attr.has_name(sym::rustc_std_internal_symbol) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL;
- } else if attr.has_name(sym::used) {
- let inner = attr.meta_item_list();
- match inner.as_deref() {
- Some([item]) if item.has_name(sym::linker) => {
- if !tcx.features().used_with_arg {
- feature_err(
- &tcx.sess.parse_sess,
- sym::used_with_arg,
- attr.span,
- "`#[used(linker)]` is currently unstable",
- )
- .emit();
- }
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_LINKER;
- }
- Some([item]) if item.has_name(sym::compiler) => {
- if !tcx.features().used_with_arg {
- feature_err(
- &tcx.sess.parse_sess,
- sym::used_with_arg,
+ // In some cases, attribute are only valid on functions, but it's the `check_attr`
+ // pass that check that they aren't used anywhere else, rather this module.
+ // In these cases, we bail from performing further checks that are only meaningful for
+ // functions (such as calling `fn_sig`, which ICEs if given a non-function). We also
+ // report a delayed bug, just in case `check_attr` isn't doing its job.
+ let fn_sig = || {
+ use DefKind::*;
+
+ let def_kind = tcx.def_kind(did);
+ if let Fn | AssocFn | Variant | Ctor(..) = def_kind {
+ Some(tcx.fn_sig(did))
+ } else {
+ tcx.sess
+ .delay_span_bug(attr.span, "this attribute can only be applied to functions");
+ None
+ }
+ };
+
+ let Some(Ident { name, .. }) = attr.ident() else {
+ continue;
+ };
+
+ match name {
+ sym::cold => codegen_fn_attrs.flags |= CodegenFnAttrFlags::COLD,
+ sym::rustc_allocator => codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR,
+ sym::ffi_returns_twice => {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_RETURNS_TWICE
+ }
+ sym::ffi_pure => codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_PURE,
+ sym::ffi_const => codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_CONST,
+ sym::rustc_nounwind => codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND,
+ sym::rustc_reallocator => codegen_fn_attrs.flags |= CodegenFnAttrFlags::REALLOCATOR,
+ sym::rustc_deallocator => codegen_fn_attrs.flags |= CodegenFnAttrFlags::DEALLOCATOR,
+ sym::rustc_allocator_zeroed => {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR_ZEROED
+ }
+ sym::naked => codegen_fn_attrs.flags |= CodegenFnAttrFlags::NAKED,
+ sym::no_mangle => {
+ if tcx.opt_item_name(did.to_def_id()).is_some() {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_MANGLE
+ } else {
+ tcx.sess
+ .struct_span_err(
attr.span,
- "`#[used(compiler)]` is currently unstable",
+ format!(
+ "`#[no_mangle]` cannot be used on {} {} as it has no name",
+ tcx.def_descr_article(did.to_def_id()),
+ tcx.def_descr(did.to_def_id()),
+ ),
)
.emit();
- }
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED;
}
- Some(_) => {
- tcx.sess.emit_err(ExpectedUsedSymbol { span: attr.span });
- }
- None => {
- // Unfortunately, unconditionally using `llvm.used` causes
- // issues in handling `.init_array` with the gold linker,
- // but using `llvm.compiler.used` caused a nontrival amount
- // of unintentional ecosystem breakage -- particularly on
- // Mach-O targets.
- //
- // As a result, we emit `llvm.compiler.used` only on ELF
- // targets. This is somewhat ad-hoc, but actually follows
- // our pre-LLVM 13 behavior (prior to the ecosystem
- // breakage), and seems to match `clang`'s behavior as well
- // (both before and after LLVM 13), possibly because they
- // have similar compatibility concerns to us. See
- // https://github.com/rust-lang/rust/issues/47384#issuecomment-1019080146
- // and following comments for some discussion of this, as
- // well as the comments in `rustc_codegen_llvm` where these
- // flags are handled.
- //
- // Anyway, to be clear: this is still up in the air
- // somewhat, and is subject to change in the future (which
- // is a good thing, because this would ideally be a bit
- // more firmed up).
- let is_like_elf = !(tcx.sess.target.is_like_osx
- || tcx.sess.target.is_like_windows
- || tcx.sess.target.is_like_wasm);
- codegen_fn_attrs.flags |= if is_like_elf {
- CodegenFnAttrFlags::USED
- } else {
- CodegenFnAttrFlags::USED_LINKER
- };
- }
- }
- } else if attr.has_name(sym::cmse_nonsecure_entry) {
- if validate_fn_only_attr(attr.span)
- && !matches!(tcx.fn_sig(did).skip_binder().abi(), abi::Abi::C { .. })
- {
- struct_span_err!(
- tcx.sess,
- attr.span,
- E0776,
- "`#[cmse_nonsecure_entry]` requires C ABI"
- )
- .emit();
}
- if !tcx.sess.target.llvm_target.contains("thumbv8m") {
- struct_span_err!(tcx.sess, attr.span, E0775, "`#[cmse_nonsecure_entry]` is only valid for targets with the TrustZone-M extension")
- .emit();
- }
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY;
- } else if attr.has_name(sym::thread_local) {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::THREAD_LOCAL;
- } else if attr.has_name(sym::track_caller) {
- if !tcx.is_closure(did.to_def_id())
- && validate_fn_only_attr(attr.span)
- && tcx.fn_sig(did).skip_binder().abi() != abi::Abi::Rust
- {
- struct_span_err!(tcx.sess, attr.span, E0737, "`#[track_caller]` requires Rust ABI")
- .emit();
+ sym::no_coverage => codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_COVERAGE,
+ sym::rustc_std_internal_symbol => {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL
}
- if tcx.is_closure(did.to_def_id()) && !tcx.features().closure_track_caller {
- feature_err(
- &tcx.sess.parse_sess,
- sym::closure_track_caller,
- attr.span,
- "`#[track_caller]` on closures is currently unstable",
- )
- .emit();
+ sym::used => {
+ let inner = attr.meta_item_list();
+ match inner.as_deref() {
+ Some([item]) if item.has_name(sym::linker) => {
+ if !tcx.features().used_with_arg {
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::used_with_arg,
+ attr.span,
+ "`#[used(linker)]` is currently unstable",
+ )
+ .emit();
+ }
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_LINKER;
+ }
+ Some([item]) if item.has_name(sym::compiler) => {
+ if !tcx.features().used_with_arg {
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::used_with_arg,
+ attr.span,
+ "`#[used(compiler)]` is currently unstable",
+ )
+ .emit();
+ }
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED;
+ }
+ Some(_) => {
+ tcx.sess.emit_err(ExpectedUsedSymbol { span: attr.span });
+ }
+ None => {
+ // Unfortunately, unconditionally using `llvm.used` causes
+ // issues in handling `.init_array` with the gold linker,
+ // but using `llvm.compiler.used` caused a nontrival amount
+ // of unintentional ecosystem breakage -- particularly on
+ // Mach-O targets.
+ //
+ // As a result, we emit `llvm.compiler.used` only on ELF
+ // targets. This is somewhat ad-hoc, but actually follows
+ // our pre-LLVM 13 behavior (prior to the ecosystem
+ // breakage), and seems to match `clang`'s behavior as well
+ // (both before and after LLVM 13), possibly because they
+ // have similar compatibility concerns to us. See
+ // https://github.com/rust-lang/rust/issues/47384#issuecomment-1019080146
+ // and following comments for some discussion of this, as
+ // well as the comments in `rustc_codegen_llvm` where these
+ // flags are handled.
+ //
+ // Anyway, to be clear: this is still up in the air
+ // somewhat, and is subject to change in the future (which
+ // is a good thing, because this would ideally be a bit
+ // more firmed up).
+ let is_like_elf = !(tcx.sess.target.is_like_osx
+ || tcx.sess.target.is_like_windows
+ || tcx.sess.target.is_like_wasm);
+ codegen_fn_attrs.flags |= if is_like_elf {
+ CodegenFnAttrFlags::USED
+ } else {
+ CodegenFnAttrFlags::USED_LINKER
+ };
+ }
+ }
}
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::TRACK_CALLER;
- } else if attr.has_name(sym::export_name) {
- if let Some(s) = attr.value_str() {
- if s.as_str().contains('\0') {
- // `#[export_name = ...]` will be converted to a null-terminated string,
- // so it may not contain any null characters.
+ sym::cmse_nonsecure_entry => {
+ if let Some(fn_sig) = fn_sig()
+ && !matches!(fn_sig.skip_binder().abi(), abi::Abi::C { .. })
+ {
struct_span_err!(
tcx.sess,
attr.span,
- E0648,
- "`export_name` may not contain null characters"
+ E0776,
+ "`#[cmse_nonsecure_entry]` requires C ABI"
)
.emit();
}
- codegen_fn_attrs.export_name = Some(s);
+ if !tcx.sess.target.llvm_target.contains("thumbv8m") {
+ struct_span_err!(tcx.sess, attr.span, E0775, "`#[cmse_nonsecure_entry]` is only valid for targets with the TrustZone-M extension")
+ .emit();
+ }
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY
}
- } else if attr.has_name(sym::target_feature) {
- if !tcx.is_closure(did.to_def_id())
- && tcx.fn_sig(did).skip_binder().unsafety() == hir::Unsafety::Normal
- {
- if tcx.sess.target.is_like_wasm || tcx.sess.opts.actually_rustdoc {
- // The `#[target_feature]` attribute is allowed on
- // WebAssembly targets on all functions, including safe
- // ones. Other targets require that `#[target_feature]` is
- // only applied to unsafe functions (pending the
- // `target_feature_11` feature) because on most targets
- // execution of instructions that are not supported is
- // considered undefined behavior. For WebAssembly which is a
- // 100% safe target at execution time it's not possible to
- // execute undefined instructions, and even if a future
- // feature was added in some form for this it would be a
- // deterministic trap. There is no undefined behavior when
- // executing WebAssembly so `#[target_feature]` is allowed
- // on safe functions (but again, only for WebAssembly)
- //
- // Note that this is also allowed if `actually_rustdoc` so
- // if a target is documenting some wasm-specific code then
- // it's not spuriously denied.
- } else if !tcx.features().target_feature_11 {
- let mut err = feature_err(
+ sym::thread_local => codegen_fn_attrs.flags |= CodegenFnAttrFlags::THREAD_LOCAL,
+ sym::track_caller => {
+ if !tcx.is_closure(did.to_def_id())
+ && let Some(fn_sig) = fn_sig()
+ && fn_sig.skip_binder().abi() != abi::Abi::Rust
+ {
+ struct_span_err!(tcx.sess, attr.span, E0737, "`#[track_caller]` requires Rust ABI")
+ .emit();
+ }
+ if tcx.is_closure(did.to_def_id()) && !tcx.features().closure_track_caller {
+ feature_err(
&tcx.sess.parse_sess,
- sym::target_feature_11,
+ sym::closure_track_caller,
attr.span,
- "`#[target_feature(..)]` can only be applied to `unsafe` functions",
- );
- err.span_label(tcx.def_span(did), "not an `unsafe` function");
- err.emit();
- } else {
- check_target_feature_trait_unsafe(tcx, did, attr.span);
+ "`#[track_caller]` on closures is currently unstable",
+ )
+ .emit();
}
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::TRACK_CALLER
}
- from_target_feature(
- tcx,
- attr,
- supported_target_features,
- &mut codegen_fn_attrs.target_features,
- );
- } else if attr.has_name(sym::linkage) {
- if let Some(val) = attr.value_str() {
- let linkage = Some(linkage_by_name(tcx, did, val.as_str()));
- if tcx.is_foreign_item(did) {
- codegen_fn_attrs.import_linkage = linkage;
- } else {
- codegen_fn_attrs.linkage = linkage;
+ sym::export_name => {
+ if let Some(s) = attr.value_str() {
+ if s.as_str().contains('\0') {
+ // `#[export_name = ...]` will be converted to a null-terminated string,
+ // so it may not contain any null characters.
+ struct_span_err!(
+ tcx.sess,
+ attr.span,
+ E0648,
+ "`export_name` may not contain null characters"
+ )
+ .emit();
+ }
+ codegen_fn_attrs.export_name = Some(s);
}
}
- } else if attr.has_name(sym::link_section) {
- if let Some(val) = attr.value_str() {
- if val.as_str().bytes().any(|b| b == 0) {
- let msg = format!(
- "illegal null byte in link_section \
- value: `{}`",
- &val
- );
- tcx.sess.span_err(attr.span, &msg);
- } else {
- codegen_fn_attrs.link_section = Some(val);
+ sym::target_feature => {
+ if !tcx.is_closure(did.to_def_id())
+ && let Some(fn_sig) = fn_sig()
+ && fn_sig.skip_binder().unsafety() == hir::Unsafety::Normal
+ {
+ if tcx.sess.target.is_like_wasm || tcx.sess.opts.actually_rustdoc {
+ // The `#[target_feature]` attribute is allowed on
+ // WebAssembly targets on all functions, including safe
+ // ones. Other targets require that `#[target_feature]` is
+ // only applied to unsafe functions (pending the
+ // `target_feature_11` feature) because on most targets
+ // execution of instructions that are not supported is
+ // considered undefined behavior. For WebAssembly which is a
+ // 100% safe target at execution time it's not possible to
+ // execute undefined instructions, and even if a future
+ // feature was added in some form for this it would be a
+ // deterministic trap. There is no undefined behavior when
+ // executing WebAssembly so `#[target_feature]` is allowed
+ // on safe functions (but again, only for WebAssembly)
+ //
+ // Note that this is also allowed if `actually_rustdoc` so
+ // if a target is documenting some wasm-specific code then
+ // it's not spuriously denied.
+ //
+ // This exception needs to be kept in sync with allowing
+ // `#[target_feature]` on `main` and `start`.
+ } else if !tcx.features().target_feature_11 {
+ let mut err = feature_err(
+ &tcx.sess.parse_sess,
+ sym::target_feature_11,
+ attr.span,
+ "`#[target_feature(..)]` can only be applied to `unsafe` functions",
+ );
+ err.span_label(tcx.def_span(did), "not an `unsafe` function");
+ err.emit();
+ } else {
+ check_target_feature_trait_unsafe(tcx, did, attr.span);
+ }
}
+ from_target_feature(
+ tcx,
+ attr,
+ supported_target_features,
+ &mut codegen_fn_attrs.target_features,
+ );
}
- } else if attr.has_name(sym::link_name) {
- codegen_fn_attrs.link_name = attr.value_str();
- } else if attr.has_name(sym::link_ordinal) {
- link_ordinal_span = Some(attr.span);
- if let ordinal @ Some(_) = check_link_ordinal(tcx, attr) {
- codegen_fn_attrs.link_ordinal = ordinal;
+ sym::linkage => {
+ if let Some(val) = attr.value_str() {
+ let linkage = Some(linkage_by_name(tcx, did, val.as_str()));
+ if tcx.is_foreign_item(did) {
+ codegen_fn_attrs.import_linkage = linkage;
+ } else {
+ codegen_fn_attrs.linkage = linkage;
+ }
+ }
}
- } else if attr.has_name(sym::no_sanitize) {
- no_sanitize_span = Some(attr.span);
- if let Some(list) = attr.meta_item_list() {
- for item in list.iter() {
- if item.has_name(sym::address) {
- codegen_fn_attrs.no_sanitize |=
- SanitizerSet::ADDRESS | SanitizerSet::KERNELADDRESS;
- } else if item.has_name(sym::cfi) {
- codegen_fn_attrs.no_sanitize |= SanitizerSet::CFI;
- } else if item.has_name(sym::kcfi) {
- codegen_fn_attrs.no_sanitize |= SanitizerSet::KCFI;
- } else if item.has_name(sym::memory) {
- codegen_fn_attrs.no_sanitize |= SanitizerSet::MEMORY;
- } else if item.has_name(sym::memtag) {
- codegen_fn_attrs.no_sanitize |= SanitizerSet::MEMTAG;
- } else if item.has_name(sym::shadow_call_stack) {
- codegen_fn_attrs.no_sanitize |= SanitizerSet::SHADOWCALLSTACK;
- } else if item.has_name(sym::thread) {
- codegen_fn_attrs.no_sanitize |= SanitizerSet::THREAD;
- } else if item.has_name(sym::hwaddress) {
- codegen_fn_attrs.no_sanitize |= SanitizerSet::HWADDRESS;
+ sym::link_section => {
+ if let Some(val) = attr.value_str() {
+ if val.as_str().bytes().any(|b| b == 0) {
+ let msg = format!("illegal null byte in link_section value: `{}`", &val);
+ tcx.sess.span_err(attr.span, &msg);
} else {
- tcx.sess
- .struct_span_err(item.span(), "invalid argument for `no_sanitize`")
- .note("expected one of: `address`, `cfi`, `hwaddress`, `kcfi`, `memory`, `memtag`, `shadow-call-stack`, or `thread`")
- .emit();
+ codegen_fn_attrs.link_section = Some(val);
}
}
}
- } else if attr.has_name(sym::instruction_set) {
- codegen_fn_attrs.instruction_set = attr.meta_item_list().and_then(|l| match &l[..] {
- [NestedMetaItem::MetaItem(set)] => {
- let segments =
- set.path.segments.iter().map(|x| x.ident.name).collect::<Vec<_>>();
- match segments.as_slice() {
- [sym::arm, sym::a32] | [sym::arm, sym::t32] => {
- if !tcx.sess.target.has_thumb_interworking {
- struct_span_err!(
- tcx.sess.diagnostic(),
- attr.span,
- E0779,
- "target does not support `#[instruction_set]`"
- )
- .emit();
- None
- } else if segments[1] == sym::a32 {
- Some(InstructionSetAttr::ArmA32)
- } else if segments[1] == sym::t32 {
- Some(InstructionSetAttr::ArmT32)
- } else {
- unreachable!()
+ sym::link_name => codegen_fn_attrs.link_name = attr.value_str(),
+ sym::link_ordinal => {
+ link_ordinal_span = Some(attr.span);
+ if let ordinal @ Some(_) = check_link_ordinal(tcx, attr) {
+ codegen_fn_attrs.link_ordinal = ordinal;
+ }
+ }
+ sym::no_sanitize => {
+ no_sanitize_span = Some(attr.span);
+ if let Some(list) = attr.meta_item_list() {
+ for item in list.iter() {
+ match item.name_or_empty() {
+ sym::address => {
+ codegen_fn_attrs.no_sanitize |=
+ SanitizerSet::ADDRESS | SanitizerSet::KERNELADDRESS
+ }
+ sym::cfi => codegen_fn_attrs.no_sanitize |= SanitizerSet::CFI,
+ sym::kcfi => codegen_fn_attrs.no_sanitize |= SanitizerSet::KCFI,
+ sym::memory => codegen_fn_attrs.no_sanitize |= SanitizerSet::MEMORY,
+ sym::memtag => codegen_fn_attrs.no_sanitize |= SanitizerSet::MEMTAG,
+ sym::shadow_call_stack => {
+ codegen_fn_attrs.no_sanitize |= SanitizerSet::SHADOWCALLSTACK
+ }
+ sym::thread => codegen_fn_attrs.no_sanitize |= SanitizerSet::THREAD,
+ sym::hwaddress => {
+ codegen_fn_attrs.no_sanitize |= SanitizerSet::HWADDRESS
+ }
+ _ => {
+ tcx.sess
+ .struct_span_err(item.span(), "invalid argument for `no_sanitize`")
+ .note("expected one of: `address`, `cfi`, `hwaddress`, `kcfi`, `memory`, `memtag`, `shadow-call-stack`, or `thread`")
+ .emit();
}
- }
- _ => {
- struct_span_err!(
- tcx.sess.diagnostic(),
- attr.span,
- E0779,
- "invalid instruction set specified",
- )
- .emit();
- None
}
}
}
- [] => {
- struct_span_err!(
- tcx.sess.diagnostic(),
- attr.span,
- E0778,
- "`#[instruction_set]` requires an argument"
- )
- .emit();
- None
- }
- _ => {
- struct_span_err!(
- tcx.sess.diagnostic(),
- attr.span,
- E0779,
- "cannot specify more than one instruction set"
- )
- .emit();
- None
- }
- })
- } else if attr.has_name(sym::repr) {
- codegen_fn_attrs.alignment = match attr.meta_item_list() {
- Some(items) => match items.as_slice() {
- [item] => match item.name_value_literal() {
- Some((sym::align, literal)) => {
- let alignment = rustc_attr::parse_alignment(&literal.kind);
-
- match alignment {
- Ok(align) => Some(align),
- Err(msg) => {
+ }
+ sym::instruction_set => {
+ codegen_fn_attrs.instruction_set =
+ attr.meta_item_list().and_then(|l| match &l[..] {
+ [NestedMetaItem::MetaItem(set)] => {
+ let segments =
+ set.path.segments.iter().map(|x| x.ident.name).collect::<Vec<_>>();
+ match segments.as_slice() {
+ [sym::arm, sym::a32] | [sym::arm, sym::t32] => {
+ if !tcx.sess.target.has_thumb_interworking {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ attr.span,
+ E0779,
+ "target does not support `#[instruction_set]`"
+ )
+ .emit();
+ None
+ } else if segments[1] == sym::a32 {
+ Some(InstructionSetAttr::ArmA32)
+ } else if segments[1] == sym::t32 {
+ Some(InstructionSetAttr::ArmT32)
+ } else {
+ unreachable!()
+ }
+ }
+ _ => {
struct_span_err!(
tcx.sess.diagnostic(),
attr.span,
- E0589,
- "invalid `repr(align)` attribute: {}",
- msg
+ E0779,
+ "invalid instruction set specified",
)
.emit();
-
None
}
}
}
- _ => None,
- },
- [] => None,
- _ => None,
- },
- None => None,
- };
+ [] => {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ attr.span,
+ E0778,
+ "`#[instruction_set]` requires an argument"
+ )
+ .emit();
+ None
+ }
+ _ => {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ attr.span,
+ E0779,
+ "cannot specify more than one instruction set"
+ )
+ .emit();
+ None
+ }
+ })
+ }
+ sym::repr => {
+ codegen_fn_attrs.alignment = if let Some(items) = attr.meta_item_list()
+ && let [item] = items.as_slice()
+ && let Some((sym::align, literal)) = item.name_value_literal()
+ {
+ rustc_attr::parse_alignment(&literal.kind).map_err(|msg| {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ attr.span,
+ E0589,
+ "invalid `repr(align)` attribute: {}",
+ msg
+ )
+ .emit();
+ })
+ .ok()
+ } else {
+ None
+ };
+ }
+ _ => {}
}
}
diff --git a/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs b/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs
index 1a6495cb1..1ea130400 100644
--- a/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs
+++ b/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs
@@ -1,6 +1,6 @@
pub use super::ffi::*;
-use rustc_index::vec::IndexVec;
+use rustc_index::vec::{IndexSlice, IndexVec};
use rustc_middle::mir::coverage::{
CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId,
InjectedExpressionIndex, MappedExpressionIndex, Op,
@@ -205,7 +205,7 @@ impl<'tcx> FunctionCoverage<'tcx> {
// `expression_index`s lower than the referencing `Expression`. Therefore, it is
// reasonable to look up the new index of an expression operand while the `new_indexes`
// vector is only complete up to the current `ExpressionIndex`.
- let id_to_counter = |new_indexes: &IndexVec<
+ let id_to_counter = |new_indexes: &IndexSlice<
InjectedExpressionIndex,
Option<MappedExpressionIndex>,
>,
diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs
index 6dea7496f..66e7e314f 100644
--- a/compiler/rustc_codegen_ssa/src/errors.rs
+++ b/compiler/rustc_codegen_ssa/src/errors.rs
@@ -405,8 +405,8 @@ pub struct MsvcMissingLinker;
pub struct CheckInstalledVisualStudio;
#[derive(Diagnostic)]
-#[diag(codegen_ssa_unsufficient_vs_code_product)]
-pub struct UnsufficientVSCodeProduct;
+#[diag(codegen_ssa_insufficient_vs_code_product)]
+pub struct InsufficientVSCodeProduct;
#[derive(Diagnostic)]
#[diag(codegen_ssa_processing_dymutil_failed)]
diff --git a/compiler/rustc_codegen_ssa/src/glue.rs b/compiler/rustc_codegen_ssa/src/glue.rs
index 0f6e6032f..c34f1dbf8 100644
--- a/compiler/rustc_codegen_ssa/src/glue.rs
+++ b/compiler/rustc_codegen_ssa/src/glue.rs
@@ -46,7 +46,7 @@ pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
// NOTE: ideally, we want the effects of both `unchecked_smul` and `unchecked_umul`
// (resulting in `mul nsw nuw` in LLVM IR), since we know that the multiplication
// cannot signed wrap, and that both operands are non-negative. But at the time of writing,
- // `BuilderMethods` can't do this, and it doesn't seem to enable any further optimizations.
+ // the `LLVM-C` binding can't do this, and it doesn't seem to enable any further optimizations.
bx.unchecked_smul(info.unwrap(), bx.const_usize(unit.size.bytes())),
bx.const_usize(unit.align.abi.bytes()),
)
diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs
index ebe9e50ff..0ab12314b 100644
--- a/compiler/rustc_codegen_ssa/src/lib.rs
+++ b/compiler/rustc_codegen_ssa/src/lib.rs
@@ -5,7 +5,6 @@
#![feature(int_roundings)]
#![feature(let_chains)]
#![feature(never_type)]
-#![feature(once_cell)]
#![feature(strict_provenance)]
#![feature(try_blocks)]
#![recursion_limit = "256"]
@@ -56,7 +55,7 @@ pub mod mono_item;
pub mod target_features;
pub mod traits;
-fluent_messages! { "../locales/en-US.ftl" }
+fluent_messages! { "../messages.ftl" }
pub struct ModuleCodegen<M> {
/// The name of the module. When the crate may be saved between
@@ -118,7 +117,7 @@ bitflags::bitflags! {
#[derive(Clone, Debug, Encodable, Decodable, HashStable)]
pub struct NativeLib {
pub kind: NativeLibKind,
- pub name: Option<Symbol>,
+ pub name: Symbol,
pub filename: Option<Symbol>,
pub cfg: Option<ast::MetaItem>,
pub verbatim: bool,
diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
index 95aad10fd..f43f1d64a 100644
--- a/compiler/rustc_codegen_ssa/src/mir/analyze.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
@@ -5,7 +5,7 @@ use super::FunctionCx;
use crate::traits::*;
use rustc_data_structures::graph::dominators::Dominators;
use rustc_index::bit_set::BitSet;
-use rustc_index::vec::IndexVec;
+use rustc_index::vec::{IndexSlice, IndexVec};
use rustc_middle::mir::traversal;
use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::{self, Location, TerminatorKind};
@@ -232,7 +232,6 @@ impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx>
| PlaceContext::NonMutatingUse(
NonMutatingUseContext::Inspect
| NonMutatingUseContext::SharedBorrow
- | NonMutatingUseContext::UniqueBorrow
| NonMutatingUseContext::ShallowBorrow
| NonMutatingUseContext::AddressOf
| NonMutatingUseContext::Projection,
@@ -277,14 +276,14 @@ impl CleanupKind {
/// Recover that structure in an analyze pass.
pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKind> {
fn discover_masters<'tcx>(
- result: &mut IndexVec<mir::BasicBlock, CleanupKind>,
+ result: &mut IndexSlice<mir::BasicBlock, CleanupKind>,
mir: &mir::Body<'tcx>,
) {
for (bb, data) in mir.basic_blocks.iter_enumerated() {
match data.terminator().kind {
TerminatorKind::Goto { .. }
| TerminatorKind::Resume
- | TerminatorKind::Abort
+ | TerminatorKind::Terminate
| TerminatorKind::Return
| TerminatorKind::GeneratorDrop
| TerminatorKind::Unreachable
@@ -292,12 +291,11 @@ pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKi
| TerminatorKind::Yield { .. }
| TerminatorKind::FalseEdge { .. }
| TerminatorKind::FalseUnwind { .. } => { /* nothing to do */ }
- TerminatorKind::Call { cleanup: unwind, .. }
- | TerminatorKind::InlineAsm { cleanup: unwind, .. }
- | TerminatorKind::Assert { cleanup: unwind, .. }
- | TerminatorKind::DropAndReplace { unwind, .. }
+ TerminatorKind::Call { unwind, .. }
+ | TerminatorKind::InlineAsm { unwind, .. }
+ | TerminatorKind::Assert { unwind, .. }
| TerminatorKind::Drop { unwind, .. } => {
- if let Some(unwind) = unwind {
+ if let mir::UnwindAction::Cleanup(unwind) = unwind {
debug!(
"cleanup_kinds: {:?}/{:?} registering {:?} as funclet",
bb, data, unwind
@@ -309,7 +307,10 @@ pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKi
}
}
- fn propagate<'tcx>(result: &mut IndexVec<mir::BasicBlock, CleanupKind>, mir: &mir::Body<'tcx>) {
+ fn propagate<'tcx>(
+ result: &mut IndexSlice<mir::BasicBlock, CleanupKind>,
+ mir: &mir::Body<'tcx>,
+ ) {
let mut funclet_succs = IndexVec::from_elem(None, &mir.basic_blocks);
let mut set_successor = |funclet: mir::BasicBlock, succ| match funclet_succs[funclet] {
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index 57a19a4ab..dd8697781 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -16,7 +16,7 @@ use rustc_index::vec::Idx;
use rustc_middle::mir::{self, AssertKind, SwitchTargets};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, ValidityRequirement};
use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
-use rustc_middle::ty::{self, Instance, Ty, TypeVisitableExt};
+use rustc_middle::ty::{self, Instance, Ty};
use rustc_session::config::OptLevel;
use rustc_span::source_map::Span;
use rustc_span::{sym, Symbol};
@@ -147,7 +147,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
}
/// Call `fn_ptr` of `fn_abi` with the arguments `llargs`, the optional
- /// return destination `destination` and the cleanup function `cleanup`.
+ /// return destination `destination` and the unwind action `unwind`.
fn do_call<Bx: BuilderMethods<'a, 'tcx>>(
&self,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
@@ -156,7 +156,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
fn_ptr: Bx::Value,
llargs: &[Bx::Value],
destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>,
- cleanup: Option<mir::BasicBlock>,
+ mut unwind: mir::UnwindAction,
copied_constant_arguments: &[PlaceRef<'tcx, <Bx as BackendTypes>::Value>],
mergeable_succ: bool,
) -> MergingSucc {
@@ -164,23 +164,23 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
// do an invoke, otherwise do a call.
let fn_ty = bx.fn_decl_backend_type(&fn_abi);
- let unwind_block = if let Some(cleanup) = cleanup.filter(|_| fn_abi.can_unwind) {
- Some(self.llbb_with_cleanup(fx, cleanup))
- } else if fx.mir[self.bb].is_cleanup
- && fn_abi.can_unwind
- && !base::wants_msvc_seh(fx.cx.tcx().sess)
- {
- // Exception must not propagate out of the execution of a cleanup (doing so
- // can cause undefined behaviour). We insert a double unwind guard for
- // functions that can potentially unwind to protect against this.
- //
- // This is not necessary for SEH which does not use successive unwinding
- // like Itanium EH. EH frames in SEH are different from normal function
- // frames and SEH will abort automatically if an exception tries to
- // propagate out from cleanup.
- Some(fx.double_unwind_guard())
- } else {
- None
+ if !fn_abi.can_unwind {
+ unwind = mir::UnwindAction::Unreachable;
+ }
+
+ let unwind_block = match unwind {
+ mir::UnwindAction::Cleanup(cleanup) => Some(self.llbb_with_cleanup(fx, cleanup)),
+ mir::UnwindAction::Continue => None,
+ mir::UnwindAction::Unreachable => None,
+ mir::UnwindAction::Terminate => {
+ if fx.mir[self.bb].is_cleanup && base::wants_msvc_seh(fx.cx.tcx().sess) {
+ // SEH will abort automatically if an exception tries to
+ // propagate out from cleanup.
+ None
+ } else {
+ Some(fx.terminate_block())
+ }
+ }
};
if let Some(unwind_block) = unwind_block {
@@ -234,7 +234,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
}
}
- /// Generates inline assembly with optional `destination` and `cleanup`.
+ /// Generates inline assembly with optional `destination` and `unwind`.
fn do_inlineasm<Bx: BuilderMethods<'a, 'tcx>>(
&self,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
@@ -244,11 +244,18 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
options: InlineAsmOptions,
line_spans: &[Span],
destination: Option<mir::BasicBlock>,
- cleanup: Option<mir::BasicBlock>,
+ unwind: mir::UnwindAction,
instance: Instance<'_>,
mergeable_succ: bool,
) -> MergingSucc {
- if let Some(cleanup) = cleanup {
+ let unwind_target = match unwind {
+ mir::UnwindAction::Cleanup(cleanup) => Some(self.llbb_with_cleanup(fx, cleanup)),
+ mir::UnwindAction::Terminate => Some(fx.terminate_block()),
+ mir::UnwindAction::Continue => None,
+ mir::UnwindAction::Unreachable => None,
+ };
+
+ if let Some(cleanup) = unwind_target {
let ret_llbb = if let Some(target) = destination {
fx.llbb(target)
} else {
@@ -261,7 +268,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
options,
line_spans,
instance,
- Some((ret_llbb, self.llbb_with_cleanup(fx, cleanup), self.funclet(fx))),
+ Some((ret_llbb, cleanup, self.funclet(fx))),
);
MergingSucc::False
} else {
@@ -397,8 +404,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
PassMode::Cast(cast_ty, _) => {
let op = match self.locals[mir::RETURN_PLACE] {
- LocalRef::Operand(Some(op)) => op,
- LocalRef::Operand(None) => bug!("use of return before def"),
+ LocalRef::Operand(op) => op,
+ LocalRef::PendingOperand => bug!("use of return before def"),
LocalRef::Place(cg_place) => OperandRef {
val: Ref(cg_place.llval, None, cg_place.align),
layout: cg_place.layout,
@@ -431,7 +438,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx: &mut Bx,
location: mir::Place<'tcx>,
target: mir::BasicBlock,
- unwind: Option<mir::BasicBlock>,
+ unwind: mir::UnwindAction,
mergeable_succ: bool,
) -> MergingSucc {
let ty = location.ty(self.mir, bx.tcx()).ty;
@@ -552,7 +559,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
expected: bool,
msg: &mir::AssertMessage<'tcx>,
target: mir::BasicBlock,
- cleanup: Option<mir::BasicBlock>,
+ unwind: mir::UnwindAction,
mergeable_succ: bool,
) -> MergingSucc {
let span = terminator.source_info.span;
@@ -563,15 +570,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// with #[rustc_inherit_overflow_checks] and inlined from
// another crate (mostly core::num generic/#[inline] fns),
// while the current crate doesn't use overflow checks.
- if !bx.cx().check_overflow() {
- let overflow_not_to_check = match msg {
- AssertKind::OverflowNeg(..) => true,
- AssertKind::Overflow(op, ..) => op.is_checkable(),
- _ => false,
- };
- if overflow_not_to_check {
- const_cond = Some(expected);
- }
+ if !bx.cx().check_overflow() && msg.is_optional_overflow_check() {
+ const_cond = Some(expected);
}
// Don't codegen the panic block if success if known.
@@ -607,6 +607,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// and `#[track_caller]` adds an implicit third argument.
(LangItem::PanicBoundsCheck, vec![index, len, location])
}
+ AssertKind::MisalignedPointerDereference { ref required, ref found } => {
+ let required = self.codegen_operand(bx, required).immediate();
+ let found = self.codegen_operand(bx, found).immediate();
+ // It's `fn panic_bounds_check(index: usize, len: usize)`,
+ // and `#[track_caller]` adds an implicit third argument.
+ (LangItem::PanicMisalignedPointerDereference, vec![required, found, location])
+ }
_ => {
let msg = bx.const_str(msg.description());
// It's `pub fn panic(expr: &str)`, with the wide reference being passed
@@ -618,12 +625,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let (fn_abi, llfn) = common::build_langcall(bx, Some(span), lang_item);
// Codegen the actual panic invoke/call.
- let merging_succ = helper.do_call(self, bx, fn_abi, llfn, &args, None, cleanup, &[], false);
+ let merging_succ = helper.do_call(self, bx, fn_abi, llfn, &args, None, unwind, &[], false);
assert_eq!(merging_succ, MergingSucc::False);
MergingSucc::False
}
- fn codegen_abort_terminator(
+ fn codegen_terminate_terminator(
&mut self,
helper: TerminatorCodegenHelper<'tcx>,
bx: &mut Bx,
@@ -636,7 +643,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let (fn_abi, llfn) = common::build_langcall(bx, Some(span), LangItem::PanicCannotUnwind);
// Codegen the actual panic invoke/call.
- let merging_succ = helper.do_call(self, bx, fn_abi, llfn, &[], None, None, &[], false);
+ let merging_succ = helper.do_call(
+ self,
+ bx,
+ fn_abi,
+ llfn,
+ &[],
+ None,
+ mir::UnwindAction::Unreachable,
+ &[],
+ false,
+ );
assert_eq!(merging_succ, MergingSucc::False);
}
@@ -649,7 +666,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
instance: Option<Instance<'tcx>>,
source_info: mir::SourceInfo,
target: Option<mir::BasicBlock>,
- cleanup: Option<mir::BasicBlock>,
+ unwind: mir::UnwindAction,
mergeable_succ: bool,
) -> Option<MergingSucc> {
// Emit a panic or a no-op for `assert_*` intrinsics.
@@ -696,7 +713,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
llfn,
&[msg.0, msg.1],
target.as_ref().map(|bb| (ReturnDest::Nothing, *bb)),
- cleanup,
+ unwind,
&[],
mergeable_succ,
)
@@ -719,7 +736,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args: &[mir::Operand<'tcx>],
destination: mir::Place<'tcx>,
target: Option<mir::BasicBlock>,
- cleanup: Option<mir::BasicBlock>,
+ unwind: mir::UnwindAction,
fn_span: Span,
mergeable_succ: bool,
) -> MergingSucc {
@@ -776,23 +793,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
None => bx.fn_abi_of_fn_ptr(sig, extra_args),
};
- if intrinsic == Some(sym::transmute) {
- return if let Some(target) = target {
- self.codegen_transmute(bx, &args[0], destination);
- helper.funclet_br(self, bx, target, mergeable_succ)
- } else {
- // If we are trying to transmute to an uninhabited type,
- // it is likely there is no allotted destination. In fact,
- // transmuting to an uninhabited type is UB, which means
- // we can do what we like. Here, we declare that transmuting
- // into an uninhabited type is impossible, so anything following
- // it must be unreachable.
- assert_eq!(fn_abi.ret.layout.abi, abi::Abi::Uninhabited);
- bx.unreachable();
- MergingSucc::False
- };
- }
-
if let Some(merging_succ) = self.codegen_panic_intrinsic(
&helper,
bx,
@@ -800,7 +800,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
instance,
source_info,
target,
- cleanup,
+ unwind,
mergeable_succ,
) {
return merging_succ;
@@ -835,7 +835,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match intrinsic {
None | Some(sym::drop_in_place) => {}
- Some(sym::copy_nonoverlapping) => unreachable!(),
Some(intrinsic) => {
let dest = match ret_dest {
_ if fn_abi.ret.is_indirect() => llargs[0],
@@ -924,7 +923,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
//
// This is also relevant for `Pin<&mut Self>`, where we need to peel the `Pin`.
'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
- && !op.layout.ty.is_region_ptr()
+ && !op.layout.ty.is_ref()
{
for i in 0..op.layout.fields.count() {
let field = op.extract_field(bx, i);
@@ -966,7 +965,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
Immediate(_) => {
// See comment above explaining why we peel these newtypes
'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
- && !op.layout.ty.is_region_ptr()
+ && !op.layout.ty.is_ref()
{
for i in 0..op.layout.fields.count() {
let field = op.extract_field(bx, i);
@@ -1082,7 +1081,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn_ptr,
&llargs,
target.as_ref().map(|&target| (ret_dest, target)),
- cleanup,
+ unwind,
&copied_constant_arguments,
false,
);
@@ -1102,7 +1101,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn_ptr,
&llargs,
target.as_ref().map(|&target| (ret_dest, target)),
- cleanup,
+ unwind,
&copied_constant_arguments,
mergeable_succ,
)
@@ -1118,7 +1117,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
options: ast::InlineAsmOptions,
line_spans: &[Span],
destination: Option<mir::BasicBlock>,
- cleanup: Option<mir::BasicBlock>,
+ unwind: mir::UnwindAction,
instance: Instance<'_>,
mergeable_succ: bool,
) -> MergingSucc {
@@ -1182,7 +1181,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
options,
line_spans,
destination,
- cleanup,
+ unwind,
instance,
mergeable_succ,
)
@@ -1264,8 +1263,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
MergingSucc::False
}
- mir::TerminatorKind::Abort => {
- self.codegen_abort_terminator(helper, bx, terminator);
+ mir::TerminatorKind::Terminate => {
+ self.codegen_terminate_terminator(helper, bx, terminator);
MergingSucc::False
}
@@ -1292,7 +1291,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.codegen_drop_terminator(helper, bx, place, target, unwind, mergeable_succ())
}
- mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => self
+ mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, unwind } => self
.codegen_assert_terminator(
helper,
bx,
@@ -1301,20 +1300,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
expected,
msg,
target,
- cleanup,
+ unwind,
mergeable_succ(),
),
- mir::TerminatorKind::DropAndReplace { .. } => {
- bug!("undesugared DropAndReplace in codegen: {:?}", terminator);
- }
-
mir::TerminatorKind::Call {
ref func,
ref args,
destination,
target,
- cleanup,
+ unwind,
from_hir_call: _,
fn_span,
} => self.codegen_call_terminator(
@@ -1325,7 +1320,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args,
destination,
target,
- cleanup,
+ unwind,
fn_span,
mergeable_succ(),
),
@@ -1342,7 +1337,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
options,
line_spans,
destination,
- cleanup,
+ unwind,
} => self.codegen_asm_terminator(
helper,
bx,
@@ -1352,7 +1347,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
options,
line_spans,
destination,
- cleanup,
+ unwind,
self.instance,
mergeable_succ(),
),
@@ -1486,7 +1481,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
) -> OperandRef<'tcx, Bx::Value> {
let tcx = bx.tcx();
- let mut span_to_caller_location = |span: Span| {
+ let mut span_to_caller_location = |mut span: Span| {
+ // Remove `Inlined` marks as they pollute `expansion_cause`.
+ while span.is_inlined() {
+ span.remove_mark();
+ }
let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
let caller = tcx.sess.source_map().lookup_char_pos(topmost.lo());
let const_loc = tcx.const_caller_location((
@@ -1554,62 +1553,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn landing_pad_for_uncached(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
let llbb = self.llbb(bb);
if base::wants_msvc_seh(self.cx.sess()) {
- let funclet;
- let ret_llbb;
- match self.mir[bb].terminator.as_ref().map(|t| &t.kind) {
- // This is a basic block that we're aborting the program for,
- // notably in an `extern` function. These basic blocks are inserted
- // so that we assert that `extern` functions do indeed not panic,
- // and if they do we abort the process.
- //
- // On MSVC these are tricky though (where we're doing funclets). If
- // we were to do a cleanuppad (like below) the normal functions like
- // `longjmp` would trigger the abort logic, terminating the
- // program. Instead we insert the equivalent of `catch(...)` for C++
- // which magically doesn't trigger when `longjmp` files over this
- // frame.
- //
- // Lots more discussion can be found on #48251 but this codegen is
- // modeled after clang's for:
- //
- // try {
- // foo();
- // } catch (...) {
- // bar();
- // }
- Some(&mir::TerminatorKind::Abort) => {
- let cs_llbb =
- Bx::append_block(self.cx, self.llfn, &format!("cs_funclet{:?}", bb));
- let cp_llbb =
- Bx::append_block(self.cx, self.llfn, &format!("cp_funclet{:?}", bb));
- ret_llbb = cs_llbb;
-
- let mut cs_bx = Bx::build(self.cx, cs_llbb);
- let cs = cs_bx.catch_switch(None, None, &[cp_llbb]);
-
- // The "null" here is actually a RTTI type descriptor for the
- // C++ personality function, but `catch (...)` has no type so
- // it's null. The 64 here is actually a bitfield which
- // represents that this is a catch-all block.
- let mut cp_bx = Bx::build(self.cx, cp_llbb);
- let null = cp_bx.const_null(
- cp_bx.type_i8p_ext(cp_bx.cx().data_layout().instruction_address_space),
- );
- let sixty_four = cp_bx.const_i32(64);
- funclet = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
- cp_bx.br(llbb);
- }
- _ => {
- let cleanup_llbb =
- Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb));
- ret_llbb = cleanup_llbb;
- let mut cleanup_bx = Bx::build(self.cx, cleanup_llbb);
- funclet = cleanup_bx.cleanup_pad(None, &[]);
- cleanup_bx.br(llbb);
- }
- }
+ let cleanup_bb = Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb));
+ let mut cleanup_bx = Bx::build(self.cx, cleanup_bb);
+ let funclet = cleanup_bx.cleanup_pad(None, &[]);
+ cleanup_bx.br(llbb);
self.funclets[bb] = Some(funclet);
- ret_llbb
+ cleanup_bb
} else {
let cleanup_llbb = Bx::append_block(self.cx, self.llfn, "cleanup");
let mut cleanup_bx = Bx::build(self.cx, cleanup_llbb);
@@ -1636,26 +1585,68 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
})
}
- fn double_unwind_guard(&mut self) -> Bx::BasicBlock {
- self.double_unwind_guard.unwrap_or_else(|| {
- assert!(!base::wants_msvc_seh(self.cx.sess()));
+ fn terminate_block(&mut self) -> Bx::BasicBlock {
+ self.terminate_block.unwrap_or_else(|| {
+ let funclet;
+ let llbb;
+ let mut bx;
+ if base::wants_msvc_seh(self.cx.sess()) {
+ // This is a basic block that we're aborting the program for,
+ // notably in an `extern` function. These basic blocks are inserted
+ // so that we assert that `extern` functions do indeed not panic,
+ // and if they do we abort the process.
+ //
+ // On MSVC these are tricky though (where we're doing funclets). If
+ // we were to do a cleanuppad (like below) the normal functions like
+ // `longjmp` would trigger the abort logic, terminating the
+ // program. Instead we insert the equivalent of `catch(...)` for C++
+ // which magically doesn't trigger when `longjmp` files over this
+ // frame.
+ //
+ // Lots more discussion can be found on #48251 but this codegen is
+ // modeled after clang's for:
+ //
+ // try {
+ // foo();
+ // } catch (...) {
+ // bar();
+ // }
+ llbb = Bx::append_block(self.cx, self.llfn, "cs_terminate");
+ let cp_llbb = Bx::append_block(self.cx, self.llfn, "cp_terminate");
+
+ let mut cs_bx = Bx::build(self.cx, llbb);
+ let cs = cs_bx.catch_switch(None, None, &[cp_llbb]);
+
+ // The "null" here is actually a RTTI type descriptor for the
+ // C++ personality function, but `catch (...)` has no type so
+ // it's null. The 64 here is actually a bitfield which
+ // represents that this is a catch-all block.
+ bx = Bx::build(self.cx, cp_llbb);
+ let null =
+ bx.const_null(bx.type_i8p_ext(bx.cx().data_layout().instruction_address_space));
+ let sixty_four = bx.const_i32(64);
+ funclet = Some(bx.catch_pad(cs, &[null, sixty_four, null]));
+ } else {
+ llbb = Bx::append_block(self.cx, self.llfn, "terminate");
+ bx = Bx::build(self.cx, llbb);
- let llbb = Bx::append_block(self.cx, self.llfn, "abort");
- let mut bx = Bx::build(self.cx, llbb);
- self.set_debug_loc(&mut bx, mir::SourceInfo::outermost(self.mir.span));
+ let llpersonality = self.cx.eh_personality();
+ bx.cleanup_landing_pad(llpersonality);
- let llpersonality = self.cx.eh_personality();
- bx.cleanup_landing_pad(llpersonality);
+ funclet = None;
+ }
+
+ self.set_debug_loc(&mut bx, mir::SourceInfo::outermost(self.mir.span));
let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, LangItem::PanicCannotUnwind);
let fn_ty = bx.fn_decl_backend_type(&fn_abi);
- let llret = bx.call(fn_ty, Some(&fn_abi), fn_ptr, &[], None);
+ let llret = bx.call(fn_ty, Some(&fn_abi), fn_ptr, &[], funclet.as_ref());
bx.do_not_inline(llret);
bx.unreachable();
- self.double_unwind_guard = Some(llbb);
+ self.terminate_block = Some(llbb);
llbb
})
}
@@ -1698,7 +1689,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match self.locals[index] {
LocalRef::Place(dest) => dest,
LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
- LocalRef::Operand(None) => {
+ LocalRef::PendingOperand => {
// Handle temporary places, specifically `Operand` ones, as
// they don't have `alloca`s.
return if fn_ret.is_indirect() {
@@ -1719,7 +1710,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
ReturnDest::DirectOperand(index)
};
}
- LocalRef::Operand(Some(_)) => {
+ LocalRef::Operand(_) => {
bug!("place local already assigned to");
}
}
@@ -1746,71 +1737,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
- fn codegen_transmute(&mut self, bx: &mut Bx, src: &mir::Operand<'tcx>, dst: mir::Place<'tcx>) {
- if let Some(index) = dst.as_local() {
- match self.locals[index] {
- LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
- LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"),
- LocalRef::Operand(None) => {
- let dst_layout = bx.layout_of(self.monomorphized_place_ty(dst.as_ref()));
- assert!(!dst_layout.ty.has_erasable_regions());
- let place = PlaceRef::alloca(bx, dst_layout);
- place.storage_live(bx);
- self.codegen_transmute_into(bx, src, place);
- let op = bx.load_operand(place);
- place.storage_dead(bx);
- self.locals[index] = LocalRef::Operand(Some(op));
- self.debug_introduce_local(bx, index);
- }
- LocalRef::Operand(Some(op)) => {
- assert!(op.layout.is_zst(), "assigning to initialized SSAtemp");
- }
- }
- } else {
- let dst = self.codegen_place(bx, dst.as_ref());
- self.codegen_transmute_into(bx, src, dst);
- }
- }
-
- fn codegen_transmute_into(
- &mut self,
- bx: &mut Bx,
- src: &mir::Operand<'tcx>,
- dst: PlaceRef<'tcx, Bx::Value>,
- ) {
- let src = self.codegen_operand(bx, src);
-
- // Special-case transmutes between scalars as simple bitcasts.
- match (src.layout.abi, dst.layout.abi) {
- (abi::Abi::Scalar(src_scalar), abi::Abi::Scalar(dst_scalar)) => {
- // HACK(eddyb) LLVM doesn't like `bitcast`s between pointers and non-pointers.
- let src_is_ptr = matches!(src_scalar.primitive(), abi::Pointer(_));
- let dst_is_ptr = matches!(dst_scalar.primitive(), abi::Pointer(_));
- if src_is_ptr == dst_is_ptr {
- assert_eq!(src.layout.size, dst.layout.size);
-
- // NOTE(eddyb) the `from_immediate` and `to_immediate_scalar`
- // conversions allow handling `bool`s the same as `u8`s.
- let src = bx.from_immediate(src.immediate());
- // LLVM also doesn't like `bitcast`s between pointers in different address spaces.
- let src_as_dst = if src_is_ptr {
- bx.pointercast(src, bx.backend_type(dst.layout))
- } else {
- bx.bitcast(src, bx.backend_type(dst.layout))
- };
- Immediate(bx.to_immediate_scalar(src_as_dst, dst_scalar)).store(bx, dst);
- return;
- }
- }
- _ => {}
- }
-
- let llty = bx.backend_type(src.layout);
- let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
- let align = src.layout.align.abi.min(dst.align);
- src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, align));
- }
-
// Stores the return value of a function call into it's final location.
fn store_return(
&mut self,
@@ -1827,7 +1753,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
IndirectOperand(tmp, index) => {
let op = bx.load_operand(tmp);
tmp.storage_dead(bx);
- self.locals[index] = LocalRef::Operand(Some(op));
+ self.locals[index] = LocalRef::Operand(op);
self.debug_introduce_local(bx, index);
}
DirectOperand(index) => {
@@ -1842,7 +1768,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} else {
OperandRef::from_immediate_or_packed_pair(bx, llval, ret_abi.layout)
};
- self.locals[index] = LocalRef::Operand(Some(op));
+ self.locals[index] = LocalRef::Operand(op);
self.debug_introduce_local(bx, index);
}
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
index 708f3bc0c..d049bafb8 100644
--- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -8,7 +8,7 @@ use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
use rustc_session::config::DebugInfo;
use rustc_span::symbol::{kw, Symbol};
use rustc_span::{BytePos, Span};
-use rustc_target::abi::{Abi, Size, VariantIdx};
+use rustc_target::abi::{Abi, FieldIdx, Size, VariantIdx};
use super::operand::{OperandRef, OperandValue};
use super::place::PlaceRef;
@@ -79,7 +79,7 @@ impl<'tcx, S: Copy, L: Copy> DebugScope<S, L> {
trait DebugInfoOffsetLocation<'tcx, Bx> {
fn deref(&self, bx: &mut Bx) -> Self;
fn layout(&self) -> TyAndLayout<'tcx>;
- fn project_field(&self, bx: &mut Bx, field: mir::Field) -> Self;
+ fn project_field(&self, bx: &mut Bx, field: FieldIdx) -> Self;
fn downcast(&self, bx: &mut Bx, variant: VariantIdx) -> Self;
}
@@ -94,7 +94,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> DebugInfoOffsetLocation<'tcx, Bx>
self.layout
}
- fn project_field(&self, bx: &mut Bx, field: mir::Field) -> Self {
+ fn project_field(&self, bx: &mut Bx, field: FieldIdx) -> Self {
PlaceRef::project_field(*self, bx, field.index())
}
@@ -116,7 +116,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> DebugInfoOffsetLocation<'tcx, Bx>
*self
}
- fn project_field(&self, bx: &mut Bx, field: mir::Field) -> Self {
+ fn project_field(&self, bx: &mut Bx, field: FieldIdx) -> Self {
self.field(bx.cx(), field.index())
}
@@ -165,11 +165,15 @@ fn calculate_debuginfo_offset<
mir::ProjectionElem::Downcast(_, variant) => {
place = place.downcast(bx, variant);
}
- _ => span_bug!(
- var.source_info.span,
- "unsupported var debuginfo place `{:?}`",
- mir::Place { local, projection: var.projection },
- ),
+ _ => {
+ // Sanity check for `can_use_in_debuginfo`.
+ debug_assert!(!elem.can_use_in_debuginfo());
+ span_bug!(
+ var.source_info.span,
+ "unsupported var debuginfo place `{:?}`",
+ mir::Place { local, projection: var.projection },
+ )
+ }
}
}
@@ -241,12 +245,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn debug_introduce_local(&self, bx: &mut Bx, local: mir::Local) {
let full_debug_info = bx.sess().opts.debuginfo == DebugInfo::Full;
- // FIXME(eddyb) maybe name the return place as `_0` or `return`?
- if local == mir::RETURN_PLACE && !self.mir.local_decls[mir::RETURN_PLACE].is_user_variable()
- {
- return;
- }
-
let vars = match &self.per_local_var_debug_info {
Some(per_local) => &per_local[local],
None => return,
@@ -303,7 +301,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let local_ref = &self.locals[local];
- let name = if bx.sess().fewer_names() {
+ // FIXME Should the return place be named?
+ let name = if bx.sess().fewer_names() || local == mir::RETURN_PLACE {
None
} else {
Some(match whole_local_var.or(fallback_var.clone()) {
@@ -317,7 +316,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
LocalRef::Place(place) | LocalRef::UnsizedPlace(place) => {
bx.set_var_name(place.llval, name);
}
- LocalRef::Operand(Some(operand)) => match operand.val {
+ LocalRef::Operand(operand) => match operand.val {
OperandValue::Ref(x, ..) | OperandValue::Immediate(x) => {
bx.set_var_name(x, name);
}
@@ -328,7 +327,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.set_var_name(b, &(name.clone() + ".1"));
}
},
- LocalRef::Operand(None) => {}
+ LocalRef::PendingOperand => {}
}
}
@@ -337,9 +336,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
let base = match local_ref {
- LocalRef::Operand(None) => return,
+ LocalRef::PendingOperand => return,
- LocalRef::Operand(Some(operand)) => {
+ LocalRef::Operand(operand) => {
// Don't spill operands onto the stack in naked functions.
// See: https://github.com/rust-lang/rust/issues/42779
let attrs = bx.tcx().codegen_fn_attrs(self.instance.def_id());
@@ -443,11 +442,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let (var_ty, var_kind) = match var.value {
mir::VarDebugInfoContents::Place(place) => {
let var_ty = self.monomorphized_place_ty(place.as_ref());
- let var_kind = if self.mir.local_kind(place.local) == mir::LocalKind::Arg
+ let var_kind = if let Some(arg_index) = var.argument_index
&& place.projection.is_empty()
- && var.source_info.scope == mir::OUTERMOST_SOURCE_SCOPE
{
- let arg_index = place.local.index() - 1;
+ let arg_index = arg_index as usize;
if target_is_msvc {
// ScalarPair parameters are spilled to the stack so they need to
// be marked as a `LocalVariable` for MSVC debuggers to visualize
@@ -456,13 +454,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if let Abi::ScalarPair(_, _) = var_ty_layout.abi {
VariableKind::LocalVariable
} else {
- VariableKind::ArgumentVariable(arg_index + 1)
+ VariableKind::ArgumentVariable(arg_index)
}
} else {
// FIXME(eddyb) shouldn't `ArgumentVariable` indices be
// offset in closures to account for the hidden environment?
- // Also, is this `+ 1` needed at all?
- VariableKind::ArgumentVariable(arg_index + 1)
+ VariableKind::ArgumentVariable(arg_index)
}
} else {
VariableKind::LocalVariable
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs
index 2ec9fdbf4..3dadb33c9 100644
--- a/compiler/rustc_codegen_ssa/src/mir/mod.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs
@@ -73,8 +73,8 @@ pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
/// Cached unreachable block
unreachable_block: Option<Bx::BasicBlock>,
- /// Cached double unwind guarding block
- double_unwind_guard: Option<Bx::BasicBlock>,
+ /// Cached terminate upon unwinding block
+ terminate_block: Option<Bx::BasicBlock>,
/// The location where each MIR arg/var/tmp/ret is stored. This is
/// usually an `PlaceRef` representing an alloca, but not always:
@@ -123,7 +123,10 @@ enum LocalRef<'tcx, V> {
/// Every time it is initialized, we have to reallocate the place
/// and update the fat pointer. That's the reason why it is indirect.
UnsizedPlace(PlaceRef<'tcx, V>),
- Operand(Option<OperandRef<'tcx, V>>),
+ /// The backend [`OperandValue`] has already been generated.
+ Operand(OperandRef<'tcx, V>),
+ /// Will be a `Self::Operand` once we get to its definition.
+ PendingOperand,
}
impl<'a, 'tcx, V: CodegenObject> LocalRef<'tcx, V> {
@@ -135,9 +138,9 @@ impl<'a, 'tcx, V: CodegenObject> LocalRef<'tcx, V> {
// Zero-size temporaries aren't always initialized, which
// doesn't matter because they don't contain data, but
// we need something in the operand.
- LocalRef::Operand(Some(OperandRef::new_zst(bx, layout)))
+ LocalRef::Operand(OperandRef::new_zst(bx, layout))
} else {
- LocalRef::Operand(None)
+ LocalRef::PendingOperand
}
}
}
@@ -163,7 +166,9 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let start_llbb = Bx::append_block(cx, llfn, "start");
let mut start_bx = Bx::build(cx, start_llbb);
- if mir.basic_blocks.iter().any(|bb| bb.is_cleanup) {
+ if mir.basic_blocks.iter().any(|bb| {
+ bb.is_cleanup || matches!(bb.terminator().unwind(), Some(mir::UnwindAction::Terminate))
+ }) {
start_bx.set_personality_fn(cx.eh_personality());
}
@@ -186,7 +191,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
personality_slot: None,
cached_llbbs,
unreachable_block: None,
- double_unwind_guard: None,
+ terminate_block: None,
cleanup_kinds,
landing_pads: IndexVec::from_elem(None, &mir.basic_blocks),
funclets: IndexVec::from_fn_n(|_| None, mir.basic_blocks.len()),
@@ -258,6 +263,10 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
// Apply debuginfo to the newly allocated locals.
fx.debug_introduce_locals(&mut start_bx);
+ // The builders will be created separately for each basic block at `codegen_block`.
+ // So drop the builder of `start_llbb` to avoid having two at the same time.
+ drop(start_bx);
+
// Codegen the body of each block using reverse postorder
for (bb, _) in traversal::reverse_postorder(&mir) {
fx.codegen_block(bb);
@@ -333,7 +342,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
// We don't have to cast or keep the argument in the alloca.
// FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
// of putting everything in allocas just so we can use llvm.dbg.declare.
- let local = |op| LocalRef::Operand(Some(op));
+ let local = |op| LocalRef::Operand(op);
match arg.mode {
PassMode::Ignore => {
return local(OperandRef::new_zst(bx, arg.layout));
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
index 34a5b638d..b37797fef 100644
--- a/compiler/rustc_codegen_ssa/src/mir/operand.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -23,10 +23,26 @@ pub enum OperandValue<V> {
/// to be valid for the operand's lifetime.
/// The second value, if any, is the extra data (vtable or length)
/// which indicates that it refers to an unsized rvalue.
+ ///
+ /// An `OperandValue` has this variant for types which are neither
+ /// `Immediate` nor `Pair`s. The backend value in this variant must be a
+ /// pointer to the *non*-immediate backend type. That pointee type is the
+ /// one returned by [`LayoutTypeMethods::backend_type`].
Ref(V, Option<V>, Align),
- /// A single LLVM value.
+ /// A single LLVM immediate value.
+ ///
+ /// An `OperandValue` *must* be this variant for any type for which
+ /// [`LayoutTypeMethods::is_backend_immediate`] returns `true`.
+ /// The backend value in this variant must be the *immediate* backend type,
+ /// as returned by [`LayoutTypeMethods::immediate_backend_type`].
Immediate(V),
/// A pair of immediate LLVM values. Used by fat pointers too.
+ ///
+ /// An `OperandValue` *must* be this variant for any type for which
+ /// [`LayoutTypeMethods::is_backend_scalar_pair`] returns `true`.
+ /// The backend values in this variant must be the *immediate* backend types,
+ /// as returned by [`LayoutTypeMethods::scalar_pair_element_backend_type`]
+ /// with `immediate: true`.
Pair(V, V),
}
@@ -60,7 +76,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
) -> OperandRef<'tcx, V> {
assert!(layout.is_zst());
OperandRef {
- val: OperandValue::Immediate(bx.const_undef(bx.immediate_backend_type(layout))),
+ val: OperandValue::Immediate(bx.const_poison(bx.immediate_backend_type(layout))),
layout,
}
}
@@ -145,7 +161,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
let llty = bx.cx().backend_type(self.layout);
debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty);
// Reconstruct the immediate aggregate.
- let mut llpair = bx.cx().const_undef(llty);
+ let mut llpair = bx.cx().const_poison(llty);
let imm_a = bx.from_immediate(a);
let imm_b = bx.from_immediate(b);
llpair = bx.insert_value(llpair, imm_a, 0);
@@ -243,6 +259,31 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
}
impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
+ /// Returns an `OperandValue` that's generally UB to use in any way.
+ ///
+ /// Depending on the `layout`, returns an `Immediate` or `Pair` containing
+ /// poison value(s), or a `Ref` containing a poison pointer.
+ ///
+ /// Supports sized types only.
+ pub fn poison<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ layout: TyAndLayout<'tcx>,
+ ) -> OperandValue<V> {
+ assert!(layout.is_sized());
+ if bx.cx().is_backend_immediate(layout) {
+ let ibty = bx.cx().immediate_backend_type(layout);
+ OperandValue::Immediate(bx.const_poison(ibty))
+ } else if bx.cx().is_backend_scalar_pair(layout) {
+ let ibty0 = bx.cx().scalar_pair_element_backend_type(layout, 0, true);
+ let ibty1 = bx.cx().scalar_pair_element_backend_type(layout, 1, true);
+ OperandValue::Pair(bx.const_poison(ibty0), bx.const_poison(ibty1))
+ } else {
+ let bty = bx.cx().backend_type(layout);
+ let ptr_bty = bx.cx().type_ptr_to(bty);
+ OperandValue::Ref(bx.const_poison(ptr_bty), None, layout.align.abi)
+ }
+ }
+
pub fn store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &mut Bx,
@@ -370,7 +411,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
debug!("maybe_codegen_consume_direct(place_ref={:?})", place_ref);
match self.locals[place_ref.local] {
- LocalRef::Operand(Some(mut o)) => {
+ LocalRef::Operand(mut o) => {
// Moves out of scalar and scalar pair fields are trivial.
for elem in place_ref.projection.iter() {
match elem {
@@ -395,7 +436,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
Some(o)
}
- LocalRef::Operand(None) => {
+ LocalRef::PendingOperand => {
bug!("use of {:?} before def", place_ref);
}
LocalRef::Place(..) | LocalRef::UnsizedPlace(..) => {
diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs
index cf02f59f6..a58a61cd5 100644
--- a/compiler/rustc_codegen_ssa/src/mir/place.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/place.rs
@@ -211,10 +211,9 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
) -> V {
let dl = &bx.tcx().data_layout;
let cast_to_layout = bx.cx().layout_of(cast_to);
- let cast_to_size = cast_to_layout.layout.size();
let cast_to = bx.cx().immediate_backend_type(cast_to_layout);
if self.layout.abi.is_uninhabited() {
- return bx.cx().const_undef(cast_to);
+ return bx.cx().const_poison(cast_to);
}
let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants {
Variants::Single { index } => {
@@ -261,21 +260,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
_ => (tag_imm, bx.cx().immediate_backend_type(tag_op.layout)),
};
- let tag_size = tag_scalar.size(bx.cx());
- let max_unsigned = tag_size.unsigned_int_max();
- let max_signed = tag_size.signed_int_max() as u128;
- let min_signed = max_signed + 1;
let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
- let niche_end = niche_start.wrapping_add(relative_max as u128) & max_unsigned;
- let range = tag_scalar.valid_range(bx.cx());
-
- let sle = |lhs: u128, rhs: u128| -> bool {
- // Signed and unsigned comparisons give the same results,
- // except that in signed comparisons an integer with the
- // sign bit set is less than one with the sign bit clear.
- // Toggle the sign bit to do a signed comparison.
- (lhs ^ min_signed) <= (rhs ^ min_signed)
- };
// We have a subrange `niche_start..=niche_end` inside `range`.
// If the value of the tag is inside this subrange, it's a
@@ -291,49 +276,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
// untagged_variant
// }
// However, we will likely be able to emit simpler code.
-
- // Find the least and greatest values in `range`, considered
- // both as signed and unsigned.
- let (low_unsigned, high_unsigned) = if range.start <= range.end {
- (range.start, range.end)
- } else {
- (0, max_unsigned)
- };
- let (low_signed, high_signed) = if sle(range.start, range.end) {
- (range.start, range.end)
- } else {
- (min_signed, max_signed)
- };
-
- let niches_ule = niche_start <= niche_end;
- let niches_sle = sle(niche_start, niche_end);
- let cast_smaller = cast_to_size <= tag_size;
-
- // In the algorithm above, we can change
- // cast(relative_tag) + niche_variants.start()
- // into
- // cast(tag + (niche_variants.start() - niche_start))
- // if either the casted type is no larger than the original
- // type, or if the niche values are contiguous (in either the
- // signed or unsigned sense).
- let can_incr = cast_smaller || niches_ule || niches_sle;
-
- let data_for_boundary_niche = || -> Option<(IntPredicate, u128)> {
- if !can_incr {
- None
- } else if niche_start == low_unsigned {
- Some((IntPredicate::IntULE, niche_end))
- } else if niche_end == high_unsigned {
- Some((IntPredicate::IntUGE, niche_start))
- } else if niche_start == low_signed {
- Some((IntPredicate::IntSLE, niche_end))
- } else if niche_end == high_signed {
- Some((IntPredicate::IntSGE, niche_start))
- } else {
- None
- }
- };
-
let (is_niche, tagged_discr, delta) = if relative_max == 0 {
// Best case scenario: only one tagged variant. This will
// likely become just a comparison and a jump.
@@ -349,40 +291,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
let tagged_discr =
bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64);
(is_niche, tagged_discr, 0)
- } else if let Some((predicate, constant)) = data_for_boundary_niche() {
- // The niche values are either the lowest or the highest in
- // `range`. We can avoid the first subtraction in the
- // algorithm.
- // The algorithm is now this:
- // is_niche = tag <= niche_end
- // discr = if is_niche {
- // cast(tag + (niche_variants.start() - niche_start))
- // } else {
- // untagged_variant
- // }
- // (the first line may instead be tag >= niche_start,
- // and may be a signed or unsigned comparison)
- // The arithmetic must be done before the cast, so we can
- // have the correct wrapping behavior. See issue #104519 for
- // the consequences of getting this wrong.
- let is_niche =
- bx.icmp(predicate, tag, bx.cx().const_uint_big(tag_llty, constant));
- let delta = (niche_variants.start().as_u32() as u128).wrapping_sub(niche_start);
- let incr_tag = if delta == 0 {
- tag
- } else {
- bx.add(tag, bx.cx().const_uint_big(tag_llty, delta))
- };
-
- let cast_tag = if cast_smaller {
- bx.intcast(incr_tag, cast_to, false)
- } else if niches_ule {
- bx.zext(incr_tag, cast_to)
- } else {
- bx.sext(incr_tag, cast_to)
- };
-
- (is_niche, cast_tag, 0)
} else {
// The special cases don't apply, so we'll have to go with
// the general algorithm.
@@ -558,6 +466,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bug!("using operand local {:?} as place", place_ref);
}
}
+ LocalRef::PendingOperand => {
+ bug!("using still-pending operand local {:?} as place", place_ref);
+ }
};
for elem in place_ref.projection[base..].iter() {
cg_base = match *elem {
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index 3d856986f..d88226f5d 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -10,10 +10,10 @@ use crate::MemFlags;
use rustc_middle::mir;
use rustc_middle::mir::Operand;
use rustc_middle::ty::cast::{CastTy, IntTy};
-use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
use rustc_span::source_map::{Span, DUMMY_SP};
-use rustc_target::abi::VariantIdx;
+use rustc_target::abi::{self, FIRST_VARIANT};
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
#[instrument(level = "trace", skip(self, bx))]
@@ -72,6 +72,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
+ mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, _ty) => {
+ let src = self.codegen_operand(bx, operand);
+ self.codegen_transmute(bx, src, dest);
+ }
+
mir::Rvalue::Repeat(ref elem, count) => {
let cg_elem = self.codegen_operand(bx, elem);
@@ -113,21 +118,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let variant_dest = dest.project_downcast(bx, variant_index);
(variant_index, variant_dest, active_field_index)
}
- _ => (VariantIdx::from_u32(0), dest, None),
+ _ => (FIRST_VARIANT, dest, None),
};
if active_field_index.is_some() {
assert_eq!(operands.len(), 1);
}
- for (i, operand) in operands.iter().enumerate() {
+ for (i, operand) in operands.iter_enumerated() {
let op = self.codegen_operand(bx, operand);
// Do not generate stores and GEPis for zero-sized fields.
if !op.layout.is_zst() {
let field_index = active_field_index.unwrap_or(i);
let field = if let mir::AggregateKind::Array(_) = **kind {
- let llindex = bx.cx().const_usize(field_index as u64);
+ let llindex = bx.cx().const_usize(field_index.as_u32().into());
variant_dest.project_index(bx, llindex)
} else {
- variant_dest.project_field(bx, field_index)
+ variant_dest.project_field(bx, field_index.as_usize())
};
op.val.store(bx, field);
}
@@ -143,6 +148,156 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
+ fn codegen_transmute(
+ &mut self,
+ bx: &mut Bx,
+ src: OperandRef<'tcx, Bx::Value>,
+ dst: PlaceRef<'tcx, Bx::Value>,
+ ) {
+ // The MIR validator enforces no unsized transmutes.
+ debug_assert!(src.layout.is_sized());
+ debug_assert!(dst.layout.is_sized());
+
+ if let Some(val) = self.codegen_transmute_operand(bx, src, dst.layout) {
+ val.store(bx, dst);
+ return;
+ }
+
+ match src.val {
+ OperandValue::Ref(..) => {
+ span_bug!(
+ self.mir.span,
+ "Operand path should have handled transmute \
+ from `Ref` {src:?} to place {dst:?}"
+ );
+ }
+ OperandValue::Immediate(..) | OperandValue::Pair(..) => {
+ // When we have immediate(s), the alignment of the source is irrelevant,
+ // so we can store them using the destination's alignment.
+ let llty = bx.backend_type(src.layout);
+ let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
+ src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, dst.align));
+ }
+ }
+ }
+
+ /// Attempts to transmute an `OperandValue` to another `OperandValue`.
+ ///
+ /// Returns `None` for cases that can't work in that framework, such as for
+ /// `Immediate`->`Ref` that needs an `alloc` to get the location.
+ fn codegen_transmute_operand(
+ &mut self,
+ bx: &mut Bx,
+ operand: OperandRef<'tcx, Bx::Value>,
+ cast: TyAndLayout<'tcx>,
+ ) -> Option<OperandValue<Bx::Value>> {
+ // Check for transmutes that are always UB.
+ if operand.layout.size != cast.size
+ || operand.layout.abi.is_uninhabited()
+ || cast.abi.is_uninhabited()
+ {
+ if !operand.layout.abi.is_uninhabited() {
+ // Since this is known statically and the input could have existed
+ // without already having hit UB, might as well trap for it.
+ bx.abort();
+ }
+
+ // Because this transmute is UB, return something easy to generate,
+ // since it's fine that later uses of the value are probably UB.
+ return Some(OperandValue::poison(bx, cast));
+ }
+
+ let operand_kind = self.value_kind(operand.layout);
+ let cast_kind = self.value_kind(cast);
+
+ match operand.val {
+ OperandValue::Ref(ptr, meta, align) => {
+ debug_assert_eq!(meta, None);
+ debug_assert!(matches!(operand_kind, OperandValueKind::Ref));
+ let cast_bty = bx.backend_type(cast);
+ let cast_ptr = bx.pointercast(ptr, bx.type_ptr_to(cast_bty));
+ let fake_place = PlaceRef::new_sized_aligned(cast_ptr, cast, align);
+ Some(bx.load_operand(fake_place).val)
+ }
+ OperandValue::Immediate(imm) => {
+ let OperandValueKind::Immediate(in_scalar) = operand_kind else {
+ bug!("Found {operand_kind:?} for operand {operand:?}");
+ };
+ if let OperandValueKind::Immediate(out_scalar) = cast_kind {
+ match (in_scalar, out_scalar) {
+ (ScalarOrZst::Zst, ScalarOrZst::Zst) => {
+ Some(OperandRef::new_zst(bx, cast).val)
+ }
+ (ScalarOrZst::Scalar(in_scalar), ScalarOrZst::Scalar(out_scalar))
+ if in_scalar.size(self.cx) == out_scalar.size(self.cx) =>
+ {
+ let cast_bty = bx.backend_type(cast);
+ Some(OperandValue::Immediate(
+ self.transmute_immediate(bx, imm, in_scalar, out_scalar, cast_bty),
+ ))
+ }
+ _ => None,
+ }
+ } else {
+ None
+ }
+ }
+ OperandValue::Pair(imm_a, imm_b) => {
+ let OperandValueKind::Pair(in_a, in_b) = operand_kind else {
+ bug!("Found {operand_kind:?} for operand {operand:?}");
+ };
+ if let OperandValueKind::Pair(out_a, out_b) = cast_kind
+ && in_a.size(self.cx) == out_a.size(self.cx)
+ && in_b.size(self.cx) == out_b.size(self.cx)
+ {
+ let out_a_ibty = bx.scalar_pair_element_backend_type(cast, 0, false);
+ let out_b_ibty = bx.scalar_pair_element_backend_type(cast, 1, false);
+ Some(OperandValue::Pair(
+ self.transmute_immediate(bx, imm_a, in_a, out_a, out_a_ibty),
+ self.transmute_immediate(bx, imm_b, in_b, out_b, out_b_ibty),
+ ))
+ } else {
+ None
+ }
+ }
+ }
+ }
+
+ /// Transmutes one of the immediates from an [`OperandValue::Immediate`]
+ /// or an [`OperandValue::Pair`] to an immediate of the target type.
+ ///
+ /// `to_backend_ty` must be the *non*-immediate backend type (so it will be
+ /// `i8`, not `i1`, for `bool`-like types.)
+ fn transmute_immediate(
+ &self,
+ bx: &mut Bx,
+ mut imm: Bx::Value,
+ from_scalar: abi::Scalar,
+ to_scalar: abi::Scalar,
+ to_backend_ty: Bx::Type,
+ ) -> Bx::Value {
+ debug_assert_eq!(from_scalar.size(self.cx), to_scalar.size(self.cx));
+
+ use abi::Primitive::*;
+ imm = bx.from_immediate(imm);
+ imm = match (from_scalar.primitive(), to_scalar.primitive()) {
+ (Int(..) | F32 | F64, Int(..) | F32 | F64) => bx.bitcast(imm, to_backend_ty),
+ (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
+ (Int(..), Pointer(..)) => bx.inttoptr(imm, to_backend_ty),
+ (Pointer(..), Int(..)) => bx.ptrtoint(imm, to_backend_ty),
+ (F32 | F64, Pointer(..)) => {
+ let int_imm = bx.bitcast(imm, bx.cx().type_isize());
+ bx.inttoptr(int_imm, to_backend_ty)
+ }
+ (Pointer(..), F32 | F64) => {
+ let int_imm = bx.ptrtoint(imm, bx.cx().type_isize());
+ bx.bitcast(int_imm, to_backend_ty)
+ }
+ };
+ imm = bx.to_immediate_scalar(imm, to_scalar);
+ imm
+ }
+
pub fn codegen_rvalue_unsized(
&mut self,
bx: &mut Bx,
@@ -295,7 +450,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
assert!(bx.cx().is_backend_immediate(cast));
let ll_t_out = bx.cx().immediate_backend_type(cast);
if operand.layout.abi.is_uninhabited() {
- let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
+ let val = OperandValue::Immediate(bx.cx().const_poison(ll_t_out));
return OperandRef { val, layout: cast };
}
let r_t_in =
@@ -344,6 +499,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
OperandValue::Immediate(newval)
}
+ mir::CastKind::Transmute => {
+ self.codegen_transmute_operand(bx, operand, cast).unwrap_or_else(|| {
+ bug!("Unsupported transmute-as-operand of {operand:?} to {cast:?}");
+ })
+ }
};
OperandRef { val, layout: cast }
}
@@ -462,8 +622,20 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
mir::Rvalue::ThreadLocalRef(def_id) => {
assert!(bx.cx().tcx().is_static(def_id));
- let static_ = bx.get_static(def_id);
let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
+ let static_ = if !def_id.is_local() && bx.cx().tcx().needs_thread_local_shim(def_id)
+ {
+ let instance = ty::Instance {
+ def: ty::InstanceDef::ThreadLocalShim(def_id),
+ substs: ty::InternalSubsts::empty(),
+ };
+ let fn_ptr = bx.get_fn_addr(instance);
+ let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty());
+ let fn_ty = bx.fn_decl_backend_type(&fn_abi);
+ bx.call(fn_ty, Some(fn_abi), fn_ptr, &[], None)
+ } else {
+ bx.get_static(def_id)
+ };
OperandRef { val: OperandValue::Immediate(static_), layout }
}
mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
@@ -491,7 +663,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// ZST are passed as operands and require special handling
// because codegen_place() panics if Local is operand.
if let Some(index) = place.as_local() {
- if let LocalRef::Operand(Some(op)) = self.locals[index] {
+ if let LocalRef::Operand(op) = self.locals[index] {
if let ty::Array(_, n) = op.layout.ty.kind() {
let n = n.eval_target_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
return bx.cx().const_usize(n);
@@ -663,17 +835,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
bx.checked_binop(oop, input_ty, lhs, rhs)
}
- mir::BinOp::Shl | mir::BinOp::Shr => {
- let lhs_llty = bx.cx().val_ty(lhs);
- let rhs_llty = bx.cx().val_ty(rhs);
- let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
- let outer_bits = bx.and(rhs, invert_mask);
-
- let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
- let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
-
- (val, of)
- }
_ => bug!("Operator `{:?}` is not a checkable operator", op),
};
@@ -684,6 +845,31 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
match *rvalue {
+ mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => {
+ let operand_ty = operand.ty(self.mir, self.cx.tcx());
+ let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty));
+ let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty));
+
+ match (self.value_kind(operand_layout), self.value_kind(cast_layout)) {
+ // Can always load from a pointer as needed
+ (OperandValueKind::Ref, _) => true,
+
+ // Need to generate an `alloc` to get a pointer from an immediate
+ (OperandValueKind::Immediate(..) | OperandValueKind::Pair(..), OperandValueKind::Ref) => false,
+
+ // When we have scalar immediates, we can only convert things
+ // where the sizes match, to avoid endianness questions.
+ (OperandValueKind::Immediate(a), OperandValueKind::Immediate(b)) =>
+ a.size(self.cx) == b.size(self.cx),
+ (OperandValueKind::Pair(a0, a1), OperandValueKind::Pair(b0, b1)) =>
+ a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx),
+
+ // Send mixings between scalars and pairs through the memory route
+ // FIXME: Maybe this could use insertvalue/extractvalue instead?
+ (OperandValueKind::Immediate(..), OperandValueKind::Pair(..)) |
+ (OperandValueKind::Pair(..), OperandValueKind::Immediate(..)) => false,
+ }
+ }
mir::Rvalue::Ref(..) |
mir::Rvalue::CopyForDeref(..) |
mir::Rvalue::AddressOf(..) |
@@ -708,4 +894,52 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// (*) this is only true if the type is suitable
}
+
+ /// Gets which variant of [`OperandValue`] is expected for a particular type.
+ fn value_kind(&self, layout: TyAndLayout<'tcx>) -> OperandValueKind {
+ if self.cx.is_backend_immediate(layout) {
+ debug_assert!(!self.cx.is_backend_scalar_pair(layout));
+ OperandValueKind::Immediate(match layout.abi {
+ abi::Abi::Scalar(s) => ScalarOrZst::Scalar(s),
+ abi::Abi::Vector { element, .. } => ScalarOrZst::Scalar(element),
+ _ if layout.is_zst() => ScalarOrZst::Zst,
+ x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"),
+ })
+ } else if self.cx.is_backend_scalar_pair(layout) {
+ let abi::Abi::ScalarPair(s1, s2) = layout.abi else {
+ span_bug!(
+ self.mir.span,
+ "Couldn't translate {:?} as backend scalar pair",
+ layout.abi,
+ );
+ };
+ OperandValueKind::Pair(s1, s2)
+ } else {
+ OperandValueKind::Ref
+ }
+ }
+}
+
+/// The variants of this match [`OperandValue`], giving details about the
+/// backend values that will be held in that other type.
+#[derive(Debug, Copy, Clone)]
+enum OperandValueKind {
+ Ref,
+ Immediate(ScalarOrZst),
+ Pair(abi::Scalar, abi::Scalar),
+}
+
+#[derive(Debug, Copy, Clone)]
+enum ScalarOrZst {
+ Zst,
+ Scalar(abi::Scalar),
+}
+
+impl ScalarOrZst {
+ pub fn size(self, cx: &impl abi::HasDataLayout) -> abi::Size {
+ match self {
+ ScalarOrZst::Zst => abi::Size::ZERO,
+ ScalarOrZst::Scalar(s) => s.size(cx),
+ }
+ }
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/statement.rs b/compiler/rustc_codegen_ssa/src/mir/statement.rs
index 60fbceb34..3fd7397ad 100644
--- a/compiler/rustc_codegen_ssa/src/mir/statement.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/statement.rs
@@ -18,12 +18,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
LocalRef::UnsizedPlace(cg_indirect_dest) => {
self.codegen_rvalue_unsized(bx, cg_indirect_dest, rvalue)
}
- LocalRef::Operand(None) => {
+ LocalRef::PendingOperand => {
let operand = self.codegen_rvalue_operand(bx, rvalue);
- self.locals[index] = LocalRef::Operand(Some(operand));
+ self.locals[index] = LocalRef::Operand(operand);
self.debug_introduce_local(bx, index);
}
- LocalRef::Operand(Some(op)) => {
+ LocalRef::Operand(op) => {
if !op.layout.is_zst() {
span_bug!(
statement.source_info.span,
@@ -92,6 +92,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
| mir::StatementKind::Retag { .. }
| mir::StatementKind::AscribeUserType(..)
| mir::StatementKind::ConstEvalCounter
+ | mir::StatementKind::PlaceMention(..)
| mir::StatementKind::Nop => {}
}
}
diff --git a/compiler/rustc_codegen_ssa/src/target_features.rs b/compiler/rustc_codegen_ssa/src/target_features.rs
index e59fad99a..611dd3d1c 100644
--- a/compiler/rustc_codegen_ssa/src/target_features.rs
+++ b/compiler/rustc_codegen_ssa/src/target_features.rs
@@ -1,14 +1,14 @@
use rustc_ast::ast;
use rustc_attr::InstructionSetAttr;
use rustc_data_structures::fx::FxHashMap;
-use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::fx::FxIndexSet;
use rustc_errors::Applicability;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::DefId;
use rustc_hir::def_id::LocalDefId;
use rustc_hir::def_id::LOCAL_CRATE;
use rustc_middle::ty::query::Providers;
-use rustc_middle::ty::{DefIdTree, TyCtxt};
+use rustc_middle::ty::TyCtxt;
use rustc_session::parse::feature_err;
use rustc_session::Session;
use rustc_span::symbol::sym;
@@ -192,7 +192,7 @@ const X86_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
("fxsr", None),
("gfni", Some(sym::avx512_target_feature)),
("lzcnt", None),
- ("movbe", Some(sym::movbe_target_feature)),
+ ("movbe", None),
("pclmulqdq", None),
("popcnt", None),
("rdrand", None),
@@ -251,6 +251,7 @@ const RISCV_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
("e", Some(sym::riscv_target_feature)),
("f", Some(sym::riscv_target_feature)),
("m", Some(sym::riscv_target_feature)),
+ ("relax", Some(sym::riscv_target_feature)),
("v", Some(sym::riscv_target_feature)),
("zba", Some(sym::riscv_target_feature)),
("zbb", Some(sym::riscv_target_feature)),
@@ -394,7 +395,6 @@ pub fn from_target_feature(
Some(sym::sse4a_target_feature) => rust_features.sse4a_target_feature,
Some(sym::tbm_target_feature) => rust_features.tbm_target_feature,
Some(sym::wasm_target_feature) => rust_features.wasm_target_feature,
- Some(sym::movbe_target_feature) => rust_features.movbe_target_feature,
Some(sym::rtm_target_feature) => rust_features.rtm_target_feature,
Some(sym::ermsb_target_feature) => rust_features.ermsb_target_feature,
Some(sym::bpf_target_feature) => rust_features.bpf_target_feature,
@@ -418,7 +418,7 @@ pub fn from_target_feature(
/// Computes the set of target features used in a function for the purposes of
/// inline assembly.
-fn asm_target_features(tcx: TyCtxt<'_>, did: DefId) -> &FxHashSet<Symbol> {
+fn asm_target_features(tcx: TyCtxt<'_>, did: DefId) -> &FxIndexSet<Symbol> {
let mut target_features = tcx.sess.unstable_target_features.clone();
if tcx.def_kind(did).has_codegen_attrs() {
let attrs = tcx.codegen_fn_attrs(did);
@@ -442,7 +442,7 @@ fn asm_target_features(tcx: TyCtxt<'_>, did: DefId) -> &FxHashSet<Symbol> {
pub fn check_target_feature_trait_unsafe(tcx: TyCtxt<'_>, id: LocalDefId, attr_span: Span) {
if let DefKind::AssocFn = tcx.def_kind(id) {
let parent_id = tcx.local_parent(id);
- if let DefKind::Impl { of_trait: true } = tcx.def_kind(parent_id) {
+ if let DefKind::Trait | DefKind::Impl { of_trait: true } = tcx.def_kind(parent_id) {
tcx.sess
.struct_span_err(
attr_span,
diff --git a/compiler/rustc_codegen_ssa/src/traits/consts.rs b/compiler/rustc_codegen_ssa/src/traits/consts.rs
index fdc7a30e8..619063027 100644
--- a/compiler/rustc_codegen_ssa/src/traits/consts.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/consts.rs
@@ -8,6 +8,7 @@ pub trait ConstMethods<'tcx>: BackendTypes {
// Constant constructors
fn const_null(&self, t: Self::Type) -> Self::Value;
fn const_undef(&self, t: Self::Type) -> Self::Value;
+ fn const_poison(&self, t: Self::Type) -> Self::Value;
fn const_int(&self, t: Self::Type, i: i64) -> Self::Value;
fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value;
fn const_uint_big(&self, t: Self::Type, u: u128) -> Self::Value;
diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs
index 109161ccc..32905b079 100644
--- a/compiler/rustc_codegen_ssa/src/traits/type_.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs
@@ -100,11 +100,22 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {
impl<'tcx, T> DerivedTypeMethods<'tcx> for T where Self: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {}
pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> {
+ /// The backend type used for a rust type when it's in memory,
+ /// such as when it's stack-allocated or when it's being loaded or stored.
fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Self::Type;
fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type;
fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Self::Type;
fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Self::Type;
fn reg_backend_type(&self, ty: &Reg) -> Self::Type;
+ /// The backend type used for a rust type when it's in an SSA register.
+ ///
+ /// For nearly all types this is the same as the [`Self::backend_type`], however
+ /// `bool` (and other `0`-or-`1` values) are kept as [`BaseTypeMethods::type_i1`]
+ /// in registers but as [`BaseTypeMethods::type_i8`] in memory.
+ ///
+ /// Converting values between the two different backend types is done using
+ /// [`from_immediate`](super::BuilderMethods::from_immediate) and
+ /// [`to_immediate_scalar`](super::BuilderMethods::to_immediate_scalar).
fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Self::Type;
fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool;
fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool;