summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_llvm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-07 05:48:48 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-07 05:48:48 +0000
commitef24de24a82fe681581cc130f342363c47c0969a (patch)
tree0d494f7e1a38b95c92426f58fe6eaa877303a86c /compiler/rustc_codegen_llvm
parentReleasing progress-linux version 1.74.1+dfsg1-1~progress7.99u1. (diff)
downloadrustc-ef24de24a82fe681581cc130f342363c47c0969a.tar.xz
rustc-ef24de24a82fe681581cc130f342363c47c0969a.zip
Merging upstream version 1.75.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_llvm')
-rw-r--r--compiler/rustc_codegen_llvm/Cargo.toml18
-rw-r--r--compiler/rustc_codegen_llvm/messages.ftl8
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs9
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs10
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs61
-rw-r--r--compiler/rustc_codegen_llvm/src/callee.rs18
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs23
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs23
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs385
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs287
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs195
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs5
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs142
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs90
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs50
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs74
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/errors.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs149
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs6
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs1
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs47
-rw-r--r--compiler/rustc_codegen_llvm/src/mono_item.rs1
-rw-r--r--compiler/rustc_codegen_llvm/src/type_.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs13
28 files changed, 849 insertions, 788 deletions
diff --git a/compiler/rustc_codegen_llvm/Cargo.toml b/compiler/rustc_codegen_llvm/Cargo.toml
index be09820d0..1d309eb90 100644
--- a/compiler/rustc_codegen_llvm/Cargo.toml
+++ b/compiler/rustc_codegen_llvm/Cargo.toml
@@ -7,17 +7,15 @@ edition = "2021"
test = false
[dependencies]
+# tidy-alphabetical-start
bitflags = "1.0"
cstr = "0.2"
+itertools = "0.10.5"
libc = "0.2"
measureme = "10.0.0"
-object = { version = "0.32.0", default-features = false, features = [
- "std",
- "read",
-] }
-tracing = "0.1"
-rustc_middle = { path = "../rustc_middle" }
+object = { version = "0.32.0", default-features = false, features = ["std", "read"] }
rustc-demangle = "0.1.21"
+rustc_ast = { path = "../rustc_ast" }
rustc_attr = { path = "../rustc_attr" }
rustc_codegen_ssa = { path = "../rustc_codegen_ssa" }
rustc_data_structures = { path = "../rustc_data_structures" }
@@ -29,12 +27,14 @@ rustc_index = { path = "../rustc_index" }
rustc_llvm = { path = "../rustc_llvm" }
rustc_macros = { path = "../rustc_macros" }
rustc_metadata = { path = "../rustc_metadata" }
+rustc_middle = { path = "../rustc_middle" }
rustc_query_system = { path = "../rustc_query_system" }
rustc_session = { path = "../rustc_session" }
+rustc_span = { path = "../rustc_span" }
rustc_symbol_mangling = { path = "../rustc_symbol_mangling" }
rustc_target = { path = "../rustc_target" }
-smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
-rustc_ast = { path = "../rustc_ast" }
-rustc_span = { path = "../rustc_span" }
serde = { version = "1", features = [ "derive" ]}
serde_json = "1"
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+tracing = "0.1"
+# tidy-alphabetical-end
diff --git a/compiler/rustc_codegen_llvm/messages.ftl b/compiler/rustc_codegen_llvm/messages.ftl
index c0cfe39f1..7a86ddc75 100644
--- a/compiler/rustc_codegen_llvm/messages.ftl
+++ b/compiler/rustc_codegen_llvm/messages.ftl
@@ -76,8 +76,8 @@ codegen_llvm_target_machine = could not create LLVM TargetMachine for triple: {$
codegen_llvm_target_machine_with_llvm_err = could not create LLVM TargetMachine for triple: {$triple}: {$llvm_err}
codegen_llvm_unknown_ctarget_feature =
- unknown feature specified for `-Ctarget-feature`: `{$feature}`
- .note = it is still passed through to the codegen backend
+ unknown and unstable feature specified for `-Ctarget-feature`: `{$feature}`
+ .note = it is still passed through to the codegen backend, but use of this feature might be unsound and the behavior of this feature can change in the future
.possible_feature = you might have meant: `{$rust_feature}`
.consider_filing_feature_request = consider filing a feature request
@@ -87,6 +87,10 @@ codegen_llvm_unknown_ctarget_feature_prefix =
codegen_llvm_unknown_debuginfo_compression = unknown debuginfo compression algorithm {$algorithm} - will fall back to uncompressed debuginfo
+codegen_llvm_unstable_ctarget_feature =
+ unstable feature specified for `-Ctarget-feature`: `{$feature}`
+ .note = this feature is not stably supported; its behavior can change in the future
+
codegen_llvm_write_bytecode = failed to write bytecode to {$path}: {$err}
codegen_llvm_write_ir = failed to write LLVM IR to {$path}
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index 9e834b83d..6e3a4cae2 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -362,9 +362,14 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
// currently use this mode so we have to allow it -- but we absolutely
// shouldn't let any more targets do that.
// (Also see <https://github.com/rust-lang/rust/issues/115666>.)
+ //
+ // The unstable abi `PtxKernel` also uses Direct for now.
+ // It needs to switch to something else before stabilization can happen.
+ // (See issue: https://github.com/rust-lang/rust/issues/117271)
assert!(
- matches!(&*cx.tcx.sess.target.arch, "wasm32" | "wasm64"),
- "`PassMode::Direct` for aggregates only allowed on wasm targets\nProblematic type: {:#?}",
+ matches!(&*cx.tcx.sess.target.arch, "wasm32" | "wasm64")
+ || self.conv == Conv::PtxKernel,
+ "`PassMode::Direct` for aggregates only allowed on wasm and `extern \"ptx-kernel\"` fns\nProblematic type: {:#?}",
arg.layout,
);
}
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index a3b0dc6b6..db297425b 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -19,7 +19,6 @@ use rustc_hir::def_id::LOCAL_CRATE;
use rustc_middle::bug;
use rustc_middle::dep_graph::WorkProduct;
use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
-use rustc_session::cgu_reuse_tracker::CguReuse;
use rustc_session::config::{self, CrateType, Lto};
use std::ffi::{CStr, CString};
@@ -585,7 +584,6 @@ fn thin_lto(
copy_jobs.push(work_product);
info!(" - {}: re-used", module_name);
assert!(cgcx.incr_comp_session_dir.is_some());
- cgcx.cgu_reuse_tracker.set_actual_reuse(module_name, CguReuse::PostLto);
continue;
}
}
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index c778a6e01..9d5204034 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -259,9 +259,17 @@ pub fn target_machine_factory(
};
let debuginfo_compression = SmallCStr::new(&debuginfo_compression);
+ let should_prefer_remapped_for_split_debuginfo_paths =
+ sess.should_prefer_remapped_for_split_debuginfo_paths();
+
Arc::new(move |config: TargetMachineFactoryConfig| {
let path_to_cstring_helper = |path: Option<PathBuf>| -> CString {
- let path = path_mapping.map_prefix(path.unwrap_or_default()).0;
+ let path = path.unwrap_or_default();
+ let path = if should_prefer_remapped_for_split_debuginfo_paths {
+ path_mapping.map_prefix(path).0
+ } else {
+ path.into()
+ };
CString::new(path.to_str().unwrap()).unwrap()
};
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index ac6d8f841..7b259055d 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -3,6 +3,7 @@ use crate::attributes;
use crate::common::Funclet;
use crate::context::CodegenCx;
use crate::llvm::{self, AtomicOrdering, AtomicRmwBinOp, BasicBlock, False, True};
+use crate::llvm_util;
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
@@ -1225,9 +1226,16 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) }
}
- fn do_not_inline(&mut self, llret: &'ll Value) {
- let noinline = llvm::AttributeKind::NoInline.create_attr(self.llcx);
- attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[noinline]);
+ fn apply_attrs_to_cleanup_callsite(&mut self, llret: &'ll Value) {
+ if llvm_util::get_version() < (17, 0, 2) {
+ // Work around https://github.com/llvm/llvm-project/issues/66984.
+ let noinline = llvm::AttributeKind::NoInline.create_attr(self.llcx);
+ attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[noinline]);
+ } else {
+ // Cleanup is always the cold path.
+ let cold_inline = llvm::AttributeKind::Cold.create_attr(self.llcx);
+ attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[cold_inline]);
+ }
}
}
@@ -1513,8 +1521,13 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
llfn: &'ll Value,
) {
let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) };
- if self.tcx.sess.is_sanitizer_cfi_enabled() && let Some(fn_abi) = fn_abi && is_indirect_call {
- if let Some(fn_attrs) = fn_attrs && fn_attrs.no_sanitize.contains(SanitizerSet::CFI) {
+ if self.tcx.sess.is_sanitizer_cfi_enabled()
+ && let Some(fn_abi) = fn_abi
+ && is_indirect_call
+ {
+ if let Some(fn_attrs) = fn_attrs
+ && fn_attrs.no_sanitize.contains(SanitizerSet::CFI)
+ {
return;
}
@@ -1551,25 +1564,29 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
llfn: &'ll Value,
) -> Option<llvm::OperandBundleDef<'ll>> {
let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) };
- let kcfi_bundle =
- if self.tcx.sess.is_sanitizer_kcfi_enabled() && let Some(fn_abi) = fn_abi && is_indirect_call {
- if let Some(fn_attrs) = fn_attrs && fn_attrs.no_sanitize.contains(SanitizerSet::KCFI) {
- return None;
- }
+ let kcfi_bundle = if self.tcx.sess.is_sanitizer_kcfi_enabled()
+ && let Some(fn_abi) = fn_abi
+ && is_indirect_call
+ {
+ if let Some(fn_attrs) = fn_attrs
+ && fn_attrs.no_sanitize.contains(SanitizerSet::KCFI)
+ {
+ return None;
+ }
- let mut options = TypeIdOptions::empty();
- if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() {
- options.insert(TypeIdOptions::GENERALIZE_POINTERS);
- }
- if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() {
- options.insert(TypeIdOptions::NORMALIZE_INTEGERS);
- }
+ let mut options = TypeIdOptions::empty();
+ if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() {
+ options.insert(TypeIdOptions::GENERALIZE_POINTERS);
+ }
+ if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() {
+ options.insert(TypeIdOptions::NORMALIZE_INTEGERS);
+ }
- let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi, options);
- Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)]))
- } else {
- None
- };
+ let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi, options);
+ Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)]))
+ } else {
+ None
+ };
kcfi_bundle
}
}
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
index 5254c3f9c..d5778757c 100644
--- a/compiler/rustc_codegen_llvm/src/callee.rs
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -46,8 +46,8 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
llfn
} else {
let instance_def_id = instance.def_id();
- let llfn = if tcx.sess.target.arch == "x86" &&
- let Some(dllimport) = common::get_dllimport(tcx, instance_def_id, sym)
+ let llfn = if tcx.sess.target.arch == "x86"
+ && let Some(dllimport) = common::get_dllimport(tcx, instance_def_id, sym)
{
// Fix for https://github.com/rust-lang/rust/issues/104453
// On x86 Windows, LLVM uses 'L' as the prefix for any private
@@ -60,8 +60,18 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
// LLVM will prefix the name with `__imp_`. Ideally, we'd like the
// existing logic below to set the Storage Class, but it has an
// exemption for MinGW for backwards compatability.
- let llfn = cx.declare_fn(&common::i686_decorated_name(&dllimport, common::is_mingw_gnu_toolchain(&tcx.sess.target), true), fn_abi, Some(instance));
- unsafe { llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport); }
+ let llfn = cx.declare_fn(
+ &common::i686_decorated_name(
+ &dllimport,
+ common::is_mingw_gnu_toolchain(&tcx.sess.target),
+ true,
+ ),
+ fn_abi,
+ Some(instance),
+ );
+ unsafe {
+ llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport);
+ }
llfn
} else {
cx.declare_fn(sym, fn_abi, Some(instance))
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index 95af2f8ef..307c1264d 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -182,10 +182,17 @@ fn check_and_apply_linkage<'ll, 'tcx>(
llvm::LLVMSetInitializer(g2, g1);
g2
}
- } else if cx.tcx.sess.target.arch == "x86" &&
- let Some(dllimport) = common::get_dllimport(cx.tcx, def_id, sym)
+ } else if cx.tcx.sess.target.arch == "x86"
+ && let Some(dllimport) = common::get_dllimport(cx.tcx, def_id, sym)
{
- cx.declare_global(&common::i686_decorated_name(&dllimport, common::is_mingw_gnu_toolchain(&cx.tcx.sess.target), true), llty)
+ cx.declare_global(
+ &common::i686_decorated_name(
+ &dllimport,
+ common::is_mingw_gnu_toolchain(&cx.tcx.sess.target),
+ true,
+ ),
+ llty,
+ )
} else {
// Generate an external declaration.
// FIXME(nagisa): investigate whether it can be changed into define_global
@@ -367,15 +374,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
let g = self.get_static(def_id);
- // boolean SSA values are i1, but they have to be stored in i8 slots,
- // otherwise some LLVM optimization passes don't work as expected
- let mut val_llty = self.val_ty(v);
- let v = if val_llty == self.type_i1() {
- val_llty = self.type_i8();
- llvm::LLVMConstZExt(v, val_llty)
- } else {
- v
- };
+ let val_llty = self.val_ty(v);
let instance = Instance::mono(self.tcx, def_id);
let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index b4b2ab1e1..4dd6372b5 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -26,8 +26,8 @@ use rustc_middle::{bug, span_bug};
use rustc_session::config::{BranchProtection, CFGuard, CFProtection};
use rustc_session::config::{CrateType, DebugInfo, PAuthKey, PacRet};
use rustc_session::Session;
-use rustc_span::source_map::Span;
use rustc_span::source_map::Spanned;
+use rustc_span::Span;
use rustc_target::abi::{
call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx,
};
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
index 763186a58..7ad2d03a5 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
@@ -1,4 +1,4 @@
-use rustc_middle::mir::coverage::{CounterId, ExpressionId, Operand};
+use rustc_middle::mir::coverage::{CounterId, CovTerm, ExpressionId};
/// Must match the layout of `LLVMRustCounterKind`.
#[derive(Copy, Clone, Debug)]
@@ -43,11 +43,11 @@ impl Counter {
Self { kind: CounterKind::Expression, id: expression_id.as_u32() }
}
- pub(crate) fn from_operand(operand: Operand) -> Self {
- match operand {
- Operand::Zero => Self::ZERO,
- Operand::Counter(id) => Self::counter_value_reference(id),
- Operand::Expression(id) => Self::expression(id),
+ pub(crate) fn from_term(term: CovTerm) -> Self {
+ match term {
+ CovTerm::Zero => Self::ZERO,
+ CovTerm::Counter(id) => Self::counter_value_reference(id),
+ CovTerm::Expression(id) => Self::expression(id),
}
}
}
@@ -73,17 +73,6 @@ pub struct CounterExpression {
pub rhs: Counter,
}
-impl CounterExpression {
- /// The dummy expression `(0 - 0)` has a representation of all zeroes,
- /// making it marginally more efficient to initialize than `(0 + 0)`.
- pub(crate) const DUMMY: Self =
- Self { lhs: Counter::ZERO, kind: ExprKind::Subtract, rhs: Counter::ZERO };
-
- pub fn new(lhs: Counter, kind: ExprKind, rhs: Counter) -> Self {
- Self { kind, lhs, rhs }
- }
-}
-
/// Corresponds to enum `llvm::coverage::CounterMappingRegion::RegionKind`.
///
/// Must match the layout of `LLVMRustCounterMappingRegionKind`.
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
index e83110dca..cd67fafb8 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
@@ -1,271 +1,270 @@
use crate::coverageinfo::ffi::{Counter, CounterExpression, ExprKind};
+use rustc_data_structures::captures::Captures;
use rustc_data_structures::fx::FxIndexSet;
-use rustc_index::IndexVec;
-use rustc_middle::mir::coverage::{CodeRegion, CounterId, ExpressionId, Op, Operand};
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::coverage::{
+ CodeRegion, CounterId, CovTerm, Expression, ExpressionId, FunctionCoverageInfo, Mapping, Op,
+};
use rustc_middle::ty::Instance;
-use rustc_middle::ty::TyCtxt;
-
-#[derive(Clone, Debug, PartialEq)]
-pub struct Expression {
- lhs: Operand,
- op: Op,
- rhs: Operand,
- region: Option<CodeRegion>,
-}
+use rustc_span::Symbol;
-/// Collects all of the coverage regions associated with (a) injected counters, (b) counter
-/// expressions (additions or subtraction), and (c) unreachable regions (always counted as zero),
-/// for a given Function. This struct also stores the `function_source_hash`,
-/// computed during instrumentation, and forwarded with counters.
-///
-/// Note, it may be important to understand LLVM's definitions of `unreachable` regions versus "gap
-/// regions" (or "gap areas"). A gap region is a code region within a counted region (either counter
-/// or expression), but the line or lines in the gap region are not executable (such as lines with
-/// only whitespace or comments). According to LLVM Code Coverage Mapping documentation, "A count
-/// for a gap area is only used as the line execution count if there are no other regions on a
-/// line."
+/// Holds all of the coverage mapping data associated with a function instance,
+/// collected during traversal of `Coverage` statements in the function's MIR.
#[derive(Debug)]
-pub struct FunctionCoverage<'tcx> {
- instance: Instance<'tcx>,
- source_hash: u64,
+pub struct FunctionCoverageCollector<'tcx> {
+ /// Coverage info that was attached to this function by the instrumentor.
+ function_coverage_info: &'tcx FunctionCoverageInfo,
is_used: bool,
- counters: IndexVec<CounterId, Option<CodeRegion>>,
- expressions: IndexVec<ExpressionId, Option<Expression>>,
- unreachable_regions: Vec<CodeRegion>,
+
+ /// Tracks which counters have been seen, so that we can identify mappings
+ /// to counters that were optimized out, and set them to zero.
+ counters_seen: BitSet<CounterId>,
+ /// Contains all expression IDs that have been seen in an `ExpressionUsed`
+ /// coverage statement, plus all expression IDs that aren't directly used
+ /// by any mappings (and therefore do not have expression-used statements).
+ /// After MIR traversal is finished, we can conclude that any IDs missing
+ /// from this set must have had their statements deleted by MIR opts.
+ expressions_seen: BitSet<ExpressionId>,
}
-impl<'tcx> FunctionCoverage<'tcx> {
+impl<'tcx> FunctionCoverageCollector<'tcx> {
/// Creates a new set of coverage data for a used (called) function.
- pub fn new(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
- Self::create(tcx, instance, true)
+ pub fn new(
+ instance: Instance<'tcx>,
+ function_coverage_info: &'tcx FunctionCoverageInfo,
+ ) -> Self {
+ Self::create(instance, function_coverage_info, true)
}
/// Creates a new set of coverage data for an unused (never called) function.
- pub fn unused(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
- Self::create(tcx, instance, false)
+ pub fn unused(
+ instance: Instance<'tcx>,
+ function_coverage_info: &'tcx FunctionCoverageInfo,
+ ) -> Self {
+ Self::create(instance, function_coverage_info, false)
}
- fn create(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>, is_used: bool) -> Self {
- let coverageinfo = tcx.coverageinfo(instance.def);
+ fn create(
+ instance: Instance<'tcx>,
+ function_coverage_info: &'tcx FunctionCoverageInfo,
+ is_used: bool,
+ ) -> Self {
+ let num_counters = function_coverage_info.num_counters;
+ let num_expressions = function_coverage_info.expressions.len();
debug!(
- "FunctionCoverage::create(instance={:?}) has coverageinfo={:?}. is_used={}",
- instance, coverageinfo, is_used
+ "FunctionCoverage::create(instance={instance:?}) has \
+ num_counters={num_counters}, num_expressions={num_expressions}, is_used={is_used}"
);
- Self {
- instance,
- source_hash: 0, // will be set with the first `add_counter()`
- is_used,
- counters: IndexVec::from_elem_n(None, coverageinfo.num_counters as usize),
- expressions: IndexVec::from_elem_n(None, coverageinfo.num_expressions as usize),
- unreachable_regions: Vec::new(),
- }
- }
-
- /// Returns true for a used (called) function, and false for an unused function.
- pub fn is_used(&self) -> bool {
- self.is_used
- }
- /// Sets the function source hash value. If called multiple times for the same function, all
- /// calls should have the same hash value.
- pub fn set_function_source_hash(&mut self, source_hash: u64) {
- if self.source_hash == 0 {
- self.source_hash = source_hash;
- } else {
- debug_assert_eq!(source_hash, self.source_hash);
+ // Create a filled set of expression IDs, so that expressions not
+ // directly used by mappings will be treated as "seen".
+ // (If they end up being unused, LLVM will delete them for us.)
+ let mut expressions_seen = BitSet::new_filled(num_expressions);
+ // For each expression ID that is directly used by one or more mappings,
+ // mark it as not-yet-seen. This indicates that we expect to see a
+ // corresponding `ExpressionUsed` statement during MIR traversal.
+ for Mapping { term, .. } in &function_coverage_info.mappings {
+ if let &CovTerm::Expression(id) = term {
+ expressions_seen.remove(id);
+ }
}
- }
- /// Adds a code region to be counted by an injected counter intrinsic.
- pub fn add_counter(&mut self, id: CounterId, region: CodeRegion) {
- if let Some(previous_region) = self.counters[id].replace(region.clone()) {
- assert_eq!(previous_region, region, "add_counter: code region for id changed");
+ Self {
+ function_coverage_info,
+ is_used,
+ counters_seen: BitSet::new_empty(num_counters),
+ expressions_seen,
}
}
- /// Both counters and "counter expressions" (or simply, "expressions") can be operands in other
- /// expressions. These are tracked as separate variants of `Operand`, so there is no ambiguity
- /// between operands that are counter IDs and operands that are expression IDs.
- pub fn add_counter_expression(
- &mut self,
- expression_id: ExpressionId,
- lhs: Operand,
- op: Op,
- rhs: Operand,
- region: Option<CodeRegion>,
- ) {
- debug!(
- "add_counter_expression({:?}, lhs={:?}, op={:?}, rhs={:?} at {:?}",
- expression_id, lhs, op, rhs, region
- );
- debug_assert!(
- expression_id.as_usize() < self.expressions.len(),
- "expression_id {} is out of range for expressions.len() = {}
- for {:?}",
- expression_id.as_usize(),
- self.expressions.len(),
- self,
- );
- if let Some(previous_expression) = self.expressions[expression_id].replace(Expression {
- lhs,
- op,
- rhs,
- region: region.clone(),
- }) {
- assert_eq!(
- previous_expression,
- Expression { lhs, op, rhs, region },
- "add_counter_expression: expression for id changed"
- );
- }
+ /// Marks a counter ID as having been seen in a counter-increment statement.
+ #[instrument(level = "debug", skip(self))]
+ pub(crate) fn mark_counter_id_seen(&mut self, id: CounterId) {
+ self.counters_seen.insert(id);
}
- /// Add a region that will be marked as "unreachable", with a constant "zero counter".
- pub fn add_unreachable_region(&mut self, region: CodeRegion) {
- self.unreachable_regions.push(region)
+ /// Marks an expression ID as having been seen in an expression-used statement.
+ #[instrument(level = "debug", skip(self))]
+ pub(crate) fn mark_expression_id_seen(&mut self, id: ExpressionId) {
+ self.expressions_seen.insert(id);
}
- /// Perform some simplifications to make the final coverage mappings
- /// slightly smaller.
+ /// Identify expressions that will always have a value of zero, and note
+ /// their IDs in [`ZeroExpressions`]. Mappings that refer to a zero expression
+ /// can instead become mappings to a constant zero value.
///
/// This method mainly exists to preserve the simplifications that were
/// already being performed by the Rust-side expression renumbering, so that
/// the resulting coverage mappings don't get worse.
- pub(crate) fn simplify_expressions(&mut self) {
+ fn identify_zero_expressions(&self) -> ZeroExpressions {
// The set of expressions that either were optimized out entirely, or
// have zero as both of their operands, and will therefore always have
// a value of zero. Other expressions that refer to these as operands
- // can have those operands replaced with `Operand::Zero`.
- let mut zero_expressions = FxIndexSet::default();
+ // can have those operands replaced with `CovTerm::Zero`.
+ let mut zero_expressions = ZeroExpressions::default();
- // For each expression, perform simplifications based on lower-numbered
- // expressions, and then update the set of always-zero expressions if
- // necessary.
+ // Simplify a copy of each expression based on lower-numbered expressions,
+ // and then update the set of always-zero expressions if necessary.
// (By construction, expressions can only refer to other expressions
- // that have lower IDs, so one simplification pass is sufficient.)
- for (id, maybe_expression) in self.expressions.iter_enumerated_mut() {
- let Some(expression) = maybe_expression else {
- // If an expression is missing, it must have been optimized away,
+ // that have lower IDs, so one pass is sufficient.)
+ for (id, expression) in self.function_coverage_info.expressions.iter_enumerated() {
+ if !self.expressions_seen.contains(id) {
+ // If an expression was not seen, it must have been optimized away,
// so any operand that refers to it can be replaced with zero.
zero_expressions.insert(id);
continue;
+ }
+
+ // We don't need to simplify the actual expression data in the
+ // expressions list; we can just simplify a temporary copy and then
+ // use that to update the set of always-zero expressions.
+ let Expression { mut lhs, op, mut rhs } = *expression;
+
+ // If an expression has an operand that is also an expression, the
+ // operand's ID must be strictly lower. This is what lets us find
+ // all zero expressions in one pass.
+ let assert_operand_expression_is_lower = |operand_id: ExpressionId| {
+ assert!(
+ operand_id < id,
+ "Operand {operand_id:?} should be less than {id:?} in {expression:?}",
+ )
};
- // If an operand refers to an expression that is always zero, then
- // that operand can be replaced with `Operand::Zero`.
- let maybe_set_operand_to_zero = |operand: &mut Operand| match &*operand {
- Operand::Expression(id) if zero_expressions.contains(id) => {
- *operand = Operand::Zero;
+ // If an operand refers to a counter or expression that is always
+ // zero, then that operand can be replaced with `CovTerm::Zero`.
+ let maybe_set_operand_to_zero = |operand: &mut CovTerm| {
+ if let CovTerm::Expression(id) = *operand {
+ assert_operand_expression_is_lower(id);
+ }
+
+ if is_zero_term(&self.counters_seen, &zero_expressions, *operand) {
+ *operand = CovTerm::Zero;
}
- _ => (),
};
- maybe_set_operand_to_zero(&mut expression.lhs);
- maybe_set_operand_to_zero(&mut expression.rhs);
+ maybe_set_operand_to_zero(&mut lhs);
+ maybe_set_operand_to_zero(&mut rhs);
// Coverage counter values cannot be negative, so if an expression
// involves subtraction from zero, assume that its RHS must also be zero.
// (Do this after simplifications that could set the LHS to zero.)
- if let Expression { lhs: Operand::Zero, op: Op::Subtract, .. } = expression {
- expression.rhs = Operand::Zero;
+ if lhs == CovTerm::Zero && op == Op::Subtract {
+ rhs = CovTerm::Zero;
}
// After the above simplifications, if both operands are zero, then
// we know that this expression is always zero too.
- if let Expression { lhs: Operand::Zero, rhs: Operand::Zero, .. } = expression {
+ if lhs == CovTerm::Zero && rhs == CovTerm::Zero {
zero_expressions.insert(id);
}
}
+
+ zero_expressions
}
- /// Return the source hash, generated from the HIR node structure, and used to indicate whether
- /// or not the source code structure changed between different compilations.
- pub fn source_hash(&self) -> u64 {
- self.source_hash
+ pub(crate) fn into_finished(self) -> FunctionCoverage<'tcx> {
+ let zero_expressions = self.identify_zero_expressions();
+ let FunctionCoverageCollector { function_coverage_info, is_used, counters_seen, .. } = self;
+
+ FunctionCoverage { function_coverage_info, is_used, counters_seen, zero_expressions }
}
+}
- /// Generate an array of CounterExpressions, and an iterator over all `Counter`s and their
- /// associated `Regions` (from which the LLVM-specific `CoverageMapGenerator` will create
- /// `CounterMappingRegion`s.
- pub fn get_expressions_and_counter_regions(
- &self,
- ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &CodeRegion)>) {
- assert!(
- self.source_hash != 0 || !self.is_used,
- "No counters provided the source_hash for used function: {:?}",
- self.instance
- );
+pub(crate) struct FunctionCoverage<'tcx> {
+ function_coverage_info: &'tcx FunctionCoverageInfo,
+ is_used: bool,
- let counter_expressions = self.counter_expressions();
- // Expression IDs are indices into `self.expressions`, and on the LLVM
- // side they will be treated as indices into `counter_expressions`, so
- // the two vectors should correspond 1:1.
- assert_eq!(self.expressions.len(), counter_expressions.len());
+ counters_seen: BitSet<CounterId>,
+ zero_expressions: ZeroExpressions,
+}
- let counter_regions = self.counter_regions();
- let expression_regions = self.expression_regions();
- let unreachable_regions = self.unreachable_regions();
+impl<'tcx> FunctionCoverage<'tcx> {
+ /// Returns true for a used (called) function, and false for an unused function.
+ pub(crate) fn is_used(&self) -> bool {
+ self.is_used
+ }
- let counter_regions =
- counter_regions.chain(expression_regions.into_iter().chain(unreachable_regions));
- (counter_expressions, counter_regions)
+ /// Return the source hash, generated from the HIR node structure, and used to indicate whether
+ /// or not the source code structure changed between different compilations.
+ pub fn source_hash(&self) -> u64 {
+ if self.is_used { self.function_coverage_info.function_source_hash } else { 0 }
}
- fn counter_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
- self.counters.iter_enumerated().filter_map(|(index, entry)| {
- // Option::map() will return None to filter out missing counters. This may happen
- // if, for example, a MIR-instrumented counter is removed during an optimization.
- entry.as_ref().map(|region| (Counter::counter_value_reference(index), region))
- })
+ /// Returns an iterator over all filenames used by this function's mappings.
+ pub(crate) fn all_file_names(&self) -> impl Iterator<Item = Symbol> + Captures<'_> {
+ self.function_coverage_info.mappings.iter().map(|mapping| mapping.code_region.file_name)
}
/// Convert this function's coverage expression data into a form that can be
/// passed through FFI to LLVM.
- fn counter_expressions(&self) -> Vec<CounterExpression> {
+ pub(crate) fn counter_expressions(
+ &self,
+ ) -> impl Iterator<Item = CounterExpression> + ExactSizeIterator + Captures<'_> {
// We know that LLVM will optimize out any unused expressions before
// producing the final coverage map, so there's no need to do the same
// thing on the Rust side unless we're confident we can do much better.
// (See `CounterExpressionsMinimizer` in `CoverageMappingWriter.cpp`.)
- self.expressions
- .iter()
- .map(|expression| match expression {
- None => {
- // This expression ID was allocated, but we never saw the
- // actual expression, so it must have been optimized out.
- // Replace it with a dummy expression, and let LLVM take
- // care of omitting it from the expression list.
- CounterExpression::DUMMY
- }
- &Some(Expression { lhs, op, rhs, .. }) => {
- // Convert the operands and operator as normal.
- CounterExpression::new(
- Counter::from_operand(lhs),
- match op {
- Op::Add => ExprKind::Add,
- Op::Subtract => ExprKind::Subtract,
- },
- Counter::from_operand(rhs),
- )
- }
- })
- .collect::<Vec<_>>()
+ self.function_coverage_info.expressions.iter().map(move |&Expression { lhs, op, rhs }| {
+ CounterExpression {
+ lhs: self.counter_for_term(lhs),
+ kind: match op {
+ Op::Add => ExprKind::Add,
+ Op::Subtract => ExprKind::Subtract,
+ },
+ rhs: self.counter_for_term(rhs),
+ }
+ })
+ }
+
+ /// Converts this function's coverage mappings into an intermediate form
+ /// that will be used by `mapgen` when preparing for FFI.
+ pub(crate) fn counter_regions(
+ &self,
+ ) -> impl Iterator<Item = (Counter, &CodeRegion)> + ExactSizeIterator {
+ self.function_coverage_info.mappings.iter().map(move |mapping| {
+ let &Mapping { term, ref code_region } = mapping;
+ let counter = self.counter_for_term(term);
+ (counter, code_region)
+ })
}
- fn expression_regions(&self) -> Vec<(Counter, &CodeRegion)> {
- // Find all of the expression IDs that weren't optimized out AND have
- // an attached code region, and return the corresponding mapping as a
- // counter/region pair.
- self.expressions
- .iter_enumerated()
- .filter_map(|(id, expression)| {
- let code_region = expression.as_ref()?.region.as_ref()?;
- Some((Counter::expression(id), code_region))
- })
- .collect::<Vec<_>>()
+ fn counter_for_term(&self, term: CovTerm) -> Counter {
+ if is_zero_term(&self.counters_seen, &self.zero_expressions, term) {
+ Counter::ZERO
+ } else {
+ Counter::from_term(term)
+ }
}
+}
+
+/// Set of expression IDs that are known to always evaluate to zero.
+/// Any mapping or expression operand that refers to these expressions can have
+/// that reference replaced with a constant zero value.
+#[derive(Default)]
+struct ZeroExpressions(FxIndexSet<ExpressionId>);
+
+impl ZeroExpressions {
+ fn insert(&mut self, id: ExpressionId) {
+ self.0.insert(id);
+ }
+
+ fn contains(&self, id: ExpressionId) -> bool {
+ self.0.contains(&id)
+ }
+}
- fn unreachable_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
- self.unreachable_regions.iter().map(|region| (Counter::ZERO, region))
+/// Returns `true` if the given term is known to have a value of zero, taking
+/// into account knowledge of which counters are unused and which expressions
+/// are always zero.
+fn is_zero_term(
+ counters_seen: &BitSet<CounterId>,
+ zero_expressions: &ZeroExpressions,
+ term: CovTerm,
+) -> bool {
+ match term {
+ CovTerm::Zero => true,
+ CovTerm::Counter(id) => !counters_seen.contains(id),
+ CovTerm::Expression(id) => zero_expressions.contains(id),
}
}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index d4e775256..274e0aeaa 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -1,18 +1,20 @@
use crate::common::CodegenCx;
use crate::coverageinfo;
use crate::coverageinfo::ffi::CounterMappingRegion;
-use crate::coverageinfo::map_data::FunctionCoverage;
+use crate::coverageinfo::map_data::{FunctionCoverage, FunctionCoverageCollector};
use crate::llvm;
-use rustc_codegen_ssa::traits::ConstMethods;
-use rustc_data_structures::fx::FxIndexSet;
+use itertools::Itertools as _;
+use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods};
+use rustc_data_structures::fx::{FxIndexMap, FxIndexSet};
use rustc_hir::def::DefKind;
use rustc_hir::def_id::DefId;
use rustc_index::IndexVec;
use rustc_middle::bug;
-use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir;
use rustc_middle::mir::coverage::CodeRegion;
-use rustc_middle::ty::TyCtxt;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::def_id::DefIdSet;
use rustc_span::Symbol;
/// Generates and exports the Coverage Map.
@@ -56,21 +58,40 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
return;
}
- let mut global_file_table = GlobalFileTable::new(tcx);
+ let function_coverage_entries = function_coverage_map
+ .into_iter()
+ .map(|(instance, function_coverage)| (instance, function_coverage.into_finished()))
+ .collect::<Vec<_>>();
+
+ let all_file_names =
+ function_coverage_entries.iter().flat_map(|(_, fn_cov)| fn_cov.all_file_names());
+ let global_file_table = GlobalFileTable::new(all_file_names);
+
+ // Encode all filenames referenced by coverage mappings in this CGU.
+ let filenames_buffer = global_file_table.make_filenames_buffer(tcx);
+
+ let filenames_size = filenames_buffer.len();
+ let filenames_val = cx.const_bytes(&filenames_buffer);
+ let filenames_ref = coverageinfo::hash_bytes(&filenames_buffer);
+
+ // Generate the coverage map header, which contains the filenames used by
+ // this CGU's coverage mappings, and store it in a well-known global.
+ let cov_data_val = generate_coverage_map(cx, version, filenames_size, filenames_val);
+ coverageinfo::save_cov_data_to_mod(cx, cov_data_val);
+
+ let mut unused_function_names = Vec::new();
+ let covfun_section_name = coverageinfo::covfun_section_name(cx);
// Encode coverage mappings and generate function records
- let mut function_data = Vec::new();
- for (instance, mut function_coverage) in function_coverage_map {
+ for (instance, function_coverage) in function_coverage_entries {
debug!("Generate function coverage for {}, {:?}", cx.codegen_unit.name(), instance);
- function_coverage.simplify_expressions();
- let function_coverage = function_coverage;
let mangled_function_name = tcx.symbol_name(instance).name;
let source_hash = function_coverage.source_hash();
let is_used = function_coverage.is_used();
let coverage_mapping_buffer =
- encode_mappings_for_function(&mut global_file_table, &function_coverage);
+ encode_mappings_for_function(&global_file_table, &function_coverage);
if coverage_mapping_buffer.is_empty() {
if function_coverage.is_used() {
@@ -84,21 +105,10 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
}
}
- function_data.push((mangled_function_name, source_hash, is_used, coverage_mapping_buffer));
- }
-
- // Encode all filenames referenced by counters/expressions in this module
- let filenames_buffer = global_file_table.into_filenames_buffer();
-
- let filenames_size = filenames_buffer.len();
- let filenames_val = cx.const_bytes(&filenames_buffer);
- let filenames_ref = coverageinfo::hash_bytes(&filenames_buffer);
-
- // Generate the LLVM IR representation of the coverage map and store it in a well-known global
- let cov_data_val = generate_coverage_map(cx, version, filenames_size, filenames_val);
+ if !is_used {
+ unused_function_names.push(mangled_function_name);
+ }
- let covfun_section_name = coverageinfo::covfun_section_name(cx);
- for (mangled_function_name, source_hash, is_used, coverage_mapping_buffer) in function_data {
save_function_record(
cx,
&covfun_section_name,
@@ -110,90 +120,143 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
);
}
- // Save the coverage data value to LLVM IR
- coverageinfo::save_cov_data_to_mod(cx, cov_data_val);
+ // For unused functions, we need to take their mangled names and store them
+ // in a specially-named global array. LLVM's `InstrProfiling` pass will
+ // detect this global and include those names in its `__llvm_prf_names`
+ // section. (See `llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp`.)
+ if !unused_function_names.is_empty() {
+ assert!(cx.codegen_unit.is_code_coverage_dead_code_cgu());
+
+ let name_globals = unused_function_names
+ .into_iter()
+ .map(|mangled_function_name| cx.const_str(mangled_function_name).0)
+ .collect::<Vec<_>>();
+ let initializer = cx.const_array(cx.type_ptr(), &name_globals);
+
+ let array = llvm::add_global(cx.llmod, cx.val_ty(initializer), "__llvm_coverage_names");
+ llvm::set_global_constant(array, true);
+ llvm::set_linkage(array, llvm::Linkage::InternalLinkage);
+ llvm::set_initializer(array, initializer);
+ }
}
+/// Maps "global" (per-CGU) file ID numbers to their underlying filenames.
struct GlobalFileTable {
- global_file_table: FxIndexSet<Symbol>,
+ /// This "raw" table doesn't include the working dir, so a filename's
+ /// global ID is its index in this set **plus one**.
+ raw_file_table: FxIndexSet<Symbol>,
}
impl GlobalFileTable {
- fn new(tcx: TyCtxt<'_>) -> Self {
- let mut global_file_table = FxIndexSet::default();
+ fn new(all_file_names: impl IntoIterator<Item = Symbol>) -> Self {
+ // Collect all of the filenames into a set. Filenames usually come in
+ // contiguous runs, so we can dedup adjacent ones to save work.
+ let mut raw_file_table = all_file_names.into_iter().dedup().collect::<FxIndexSet<Symbol>>();
+
+ // Sort the file table by its actual string values, not the arbitrary
+ // ordering of its symbols.
+ raw_file_table.sort_unstable_by(|a, b| a.as_str().cmp(b.as_str()));
+
+ Self { raw_file_table }
+ }
+
+ fn global_file_id_for_file_name(&self, file_name: Symbol) -> u32 {
+ let raw_id = self.raw_file_table.get_index_of(&file_name).unwrap_or_else(|| {
+ bug!("file name not found in prepared global file table: {file_name}");
+ });
+ // The raw file table doesn't include an entry for the working dir
+ // (which has ID 0), so add 1 to get the correct ID.
+ (raw_id + 1) as u32
+ }
+
+ fn make_filenames_buffer(&self, tcx: TyCtxt<'_>) -> Vec<u8> {
// LLVM Coverage Mapping Format version 6 (zero-based encoded as 5)
// requires setting the first filename to the compilation directory.
// Since rustc generates coverage maps with relative paths, the
// compilation directory can be combined with the relative paths
// to get absolute paths, if needed.
- let working_dir = Symbol::intern(
- &tcx.sess.opts.working_dir.remapped_path_if_available().to_string_lossy(),
- );
- global_file_table.insert(working_dir);
- Self { global_file_table }
- }
-
- fn global_file_id_for_file_name(&mut self, file_name: Symbol) -> u32 {
- let (global_file_id, _) = self.global_file_table.insert_full(file_name);
- global_file_id as u32
- }
-
- fn into_filenames_buffer(self) -> Vec<u8> {
- // This method takes `self` so that the caller can't accidentally
- // modify the original file table after encoding it into a buffer.
+ use rustc_session::RemapFileNameExt;
+ let working_dir: &str = &tcx.sess.opts.working_dir.for_codegen(&tcx.sess).to_string_lossy();
llvm::build_byte_buffer(|buffer| {
coverageinfo::write_filenames_section_to_buffer(
- self.global_file_table.iter().map(Symbol::as_str),
+ // Insert the working dir at index 0, before the other filenames.
+ std::iter::once(working_dir).chain(self.raw_file_table.iter().map(Symbol::as_str)),
buffer,
);
})
}
}
+rustc_index::newtype_index! {
+ // Tell the newtype macro to not generate `Encode`/`Decode` impls.
+ #[custom_encodable]
+ struct LocalFileId {}
+}
+
+/// Holds a mapping from "local" (per-function) file IDs to "global" (per-CGU)
+/// file IDs.
+#[derive(Default)]
+struct VirtualFileMapping {
+ local_to_global: IndexVec<LocalFileId, u32>,
+ global_to_local: FxIndexMap<u32, LocalFileId>,
+}
+
+impl VirtualFileMapping {
+ fn local_id_for_global(&mut self, global_file_id: u32) -> LocalFileId {
+ *self
+ .global_to_local
+ .entry(global_file_id)
+ .or_insert_with(|| self.local_to_global.push(global_file_id))
+ }
+
+ fn into_vec(self) -> Vec<u32> {
+ self.local_to_global.raw
+ }
+}
+
/// Using the expressions and counter regions collected for a single function,
/// generate the variable-sized payload of its corresponding `__llvm_covfun`
/// entry. The payload is returned as a vector of bytes.
///
/// Newly-encountered filenames will be added to the global file table.
fn encode_mappings_for_function(
- global_file_table: &mut GlobalFileTable,
+ global_file_table: &GlobalFileTable,
function_coverage: &FunctionCoverage<'_>,
) -> Vec<u8> {
- let (expressions, counter_regions) = function_coverage.get_expressions_and_counter_regions();
-
- let mut counter_regions = counter_regions.collect::<Vec<_>>();
+ let counter_regions = function_coverage.counter_regions();
if counter_regions.is_empty() {
return Vec::new();
}
- let mut virtual_file_mapping = IndexVec::<u32, u32>::new();
+ let expressions = function_coverage.counter_expressions().collect::<Vec<_>>();
+
+ let mut virtual_file_mapping = VirtualFileMapping::default();
let mut mapping_regions = Vec::with_capacity(counter_regions.len());
- // Sort the list of (counter, region) mapping pairs by region, so that they
- // can be grouped by filename. Prepare file IDs for each filename, and
- // prepare the mapping data so that we can pass it through FFI to LLVM.
- counter_regions.sort_by_key(|(_counter, region)| *region);
- for counter_regions_for_file in
- counter_regions.group_by(|(_, a), (_, b)| a.file_name == b.file_name)
+ // Group mappings into runs with the same filename, preserving the order
+ // yielded by `FunctionCoverage`.
+ // Prepare file IDs for each filename, and prepare the mapping data so that
+ // we can pass it through FFI to LLVM.
+ for (file_name, counter_regions_for_file) in
+ &counter_regions.group_by(|(_counter, region)| region.file_name)
{
- // Look up (or allocate) the global file ID for this filename.
- let file_name = counter_regions_for_file[0].1.file_name;
+ // Look up the global file ID for this filename.
let global_file_id = global_file_table.global_file_id_for_file_name(file_name);
// Associate that global file ID with a local file ID for this function.
- let local_file_id: u32 = virtual_file_mapping.push(global_file_id);
- debug!(" file id: local {local_file_id} => global {global_file_id} = '{file_name:?}'");
+ let local_file_id = virtual_file_mapping.local_id_for_global(global_file_id);
+ debug!(" file id: {local_file_id:?} => global {global_file_id} = '{file_name:?}'");
// For each counter/region pair in this function+file, convert it to a
// form suitable for FFI.
- for &(counter, region) in counter_regions_for_file {
+ for (counter, region) in counter_regions_for_file {
let CodeRegion { file_name: _, start_line, start_col, end_line, end_col } = *region;
debug!("Adding counter {counter:?} to map for {region:?}");
mapping_regions.push(CounterMappingRegion::code_region(
counter,
- local_file_id,
+ local_file_id.as_u32(),
start_line,
start_col,
end_line,
@@ -205,7 +268,7 @@ fn encode_mappings_for_function(
// Encode the function's coverage mappings into a buffer.
llvm::build_byte_buffer(|buffer| {
coverageinfo::write_mapping_to_buffer(
- virtual_file_mapping.raw,
+ virtual_file_mapping.into_vec(),
expressions,
mapping_regions,
buffer,
@@ -289,13 +352,12 @@ fn save_function_record(
/// `-Clink-dead-code` will not generate code for unused generic functions.)
///
/// We can find the unused functions (including generic functions) by the set difference of all MIR
-/// `DefId`s (`tcx` query `mir_keys`) minus the codegenned `DefId`s (`tcx` query
-/// `codegened_and_inlined_items`).
+/// `DefId`s (`tcx` query `mir_keys`) minus the codegenned `DefId`s (`codegenned_and_inlined_items`).
///
-/// These unused functions are then codegen'd in one of the CGUs which is marked as the
-/// "code coverage dead code cgu" during the partitioning process. This prevents us from generating
-/// code regions for the same function more than once which can lead to linker errors regarding
-/// duplicate symbols.
+/// These unused functions don't need to be codegenned, but we do need to add them to the function
+/// coverage map (in a single designated CGU) so that we still emit coverage mappings for them.
+/// We also end up adding their symbol names to a special global array that LLVM will include in
+/// its embedded coverage data.
fn add_unused_functions(cx: &CodegenCx<'_, '_>) {
assert!(cx.codegen_unit.is_code_coverage_dead_code_cgu());
@@ -315,7 +377,7 @@ fn add_unused_functions(cx: &CodegenCx<'_, '_>) {
// generic functions from consideration as well.
if !matches!(
kind,
- DefKind::Fn | DefKind::AssocFn | DefKind::Closure | DefKind::Generator
+ DefKind::Fn | DefKind::AssocFn | DefKind::Closure | DefKind::Coroutine
) {
return None;
}
@@ -326,21 +388,80 @@ fn add_unused_functions(cx: &CodegenCx<'_, '_>) {
})
.collect();
- let codegenned_def_ids = tcx.codegened_and_inlined_items(());
+ let codegenned_def_ids = codegenned_and_inlined_items(tcx);
- for non_codegenned_def_id in
- eligible_def_ids.into_iter().filter(|id| !codegenned_def_ids.contains(id))
- {
- let codegen_fn_attrs = tcx.codegen_fn_attrs(non_codegenned_def_id);
-
- // If a function is marked `#[coverage(off)]`, then skip generating a
- // dead code stub for it.
- if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_COVERAGE) {
- debug!("skipping unused fn marked #[coverage(off)]: {:?}", non_codegenned_def_id);
+ // For each `DefId` that should have coverage instrumentation but wasn't
+ // codegenned, add it to the function coverage map as an unused function.
+ for def_id in eligible_def_ids.into_iter().filter(|id| !codegenned_def_ids.contains(id)) {
+ // Skip any function that didn't have coverage data added to it by the
+ // coverage instrumentor.
+ let body = tcx.instance_mir(ty::InstanceDef::Item(def_id));
+ let Some(function_coverage_info) = body.function_coverage_info.as_deref() else {
continue;
+ };
+
+ debug!("generating unused fn: {def_id:?}");
+ let instance = declare_unused_fn(tcx, def_id);
+ add_unused_function_coverage(cx, instance, function_coverage_info);
+ }
+}
+
+/// All items participating in code generation together with (instrumented)
+/// items inlined into them.
+fn codegenned_and_inlined_items(tcx: TyCtxt<'_>) -> DefIdSet {
+ let (items, cgus) = tcx.collect_and_partition_mono_items(());
+ let mut visited = DefIdSet::default();
+ let mut result = items.clone();
+
+ for cgu in cgus {
+ for item in cgu.items().keys() {
+ if let mir::mono::MonoItem::Fn(ref instance) = item {
+ let did = instance.def_id();
+ if !visited.insert(did) {
+ continue;
+ }
+ let body = tcx.instance_mir(instance.def);
+ for block in body.basic_blocks.iter() {
+ for statement in &block.statements {
+ let mir::StatementKind::Coverage(_) = statement.kind else { continue };
+ let scope = statement.source_info.scope;
+ if let Some(inlined) = scope.inlined_instance(&body.source_scopes) {
+ result.insert(inlined.def_id());
+ }
+ }
+ }
+ }
}
+ }
- debug!("generating unused fn: {:?}", non_codegenned_def_id);
- cx.define_unused_fn(non_codegenned_def_id);
+ result
+}
+
+fn declare_unused_fn<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> ty::Instance<'tcx> {
+ ty::Instance::new(
+ def_id,
+ ty::GenericArgs::for_item(tcx, def_id, |param, _| {
+ if let ty::GenericParamDefKind::Lifetime = param.kind {
+ tcx.lifetimes.re_erased.into()
+ } else {
+ tcx.mk_param_from_def(param)
+ }
+ }),
+ )
+}
+
+fn add_unused_function_coverage<'tcx>(
+ cx: &CodegenCx<'_, 'tcx>,
+ instance: ty::Instance<'tcx>,
+ function_coverage_info: &'tcx mir::coverage::FunctionCoverageInfo,
+) {
+ // An unused function's mappings will automatically be rewritten to map to
+ // zero, because none of its counters/expressions are marked as seen.
+ let function_coverage = FunctionCoverageCollector::unused(instance, function_coverage_info);
+
+ if let Some(coverage_context) = cx.coverage_context() {
+ coverage_context.function_coverage_map.borrow_mut().insert(instance, function_coverage);
+ } else {
+ bug!("Could not get the `coverage_context`");
}
}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index c70cb670e..7d6975618 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -1,10 +1,9 @@
use crate::llvm;
-use crate::abi::Abi;
use crate::builder::Builder;
use crate::common::CodegenCx;
use crate::coverageinfo::ffi::{CounterExpression, CounterMappingRegion};
-use crate::coverageinfo::map_data::FunctionCoverage;
+use crate::coverageinfo::map_data::FunctionCoverageCollector;
use libc::c_uint;
use rustc_codegen_ssa::traits::{
@@ -12,17 +11,12 @@ use rustc_codegen_ssa::traits::{
StaticMethods,
};
use rustc_data_structures::fx::FxHashMap;
-use rustc_hir as hir;
-use rustc_hir::def_id::DefId;
use rustc_llvm::RustString;
use rustc_middle::bug;
-use rustc_middle::mir::coverage::{CounterId, CoverageKind};
+use rustc_middle::mir::coverage::CoverageKind;
use rustc_middle::mir::Coverage;
-use rustc_middle::ty;
-use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
-use rustc_middle::ty::GenericArgs;
+use rustc_middle::ty::layout::HasTyCtxt;
use rustc_middle::ty::Instance;
-use rustc_middle::ty::Ty;
use std::cell::RefCell;
@@ -30,14 +24,13 @@ pub(crate) mod ffi;
pub(crate) mod map_data;
pub mod mapgen;
-const UNUSED_FUNCTION_COUNTER_ID: CounterId = CounterId::START;
-
const VAR_ALIGN_BYTES: usize = 8;
/// A context object for maintaining all state needed by the coverageinfo module.
pub struct CrateCoverageContext<'ll, 'tcx> {
/// Coverage data for each instrumented function identified by DefId.
- pub(crate) function_coverage_map: RefCell<FxHashMap<Instance<'tcx>, FunctionCoverage<'tcx>>>,
+ pub(crate) function_coverage_map:
+ RefCell<FxHashMap<Instance<'tcx>, FunctionCoverageCollector<'tcx>>>,
pub(crate) pgo_func_name_var_map: RefCell<FxHashMap<Instance<'tcx>, &'ll llvm::Value>>,
}
@@ -49,7 +42,9 @@ impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> {
}
}
- pub fn take_function_coverage_map(&self) -> FxHashMap<Instance<'tcx>, FunctionCoverage<'tcx>> {
+ pub fn take_function_coverage_map(
+ &self,
+ ) -> FxHashMap<Instance<'tcx>, FunctionCoverageCollector<'tcx>> {
self.function_coverage_map.replace(FxHashMap::default())
}
}
@@ -76,68 +71,56 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
bug!("Could not get the `coverage_context`");
}
}
-
- /// Functions with MIR-based coverage are normally codegenned _only_ if
- /// called. LLVM coverage tools typically expect every function to be
- /// defined (even if unused), with at least one call to LLVM intrinsic
- /// `instrprof.increment`.
- ///
- /// Codegen a small function that will never be called, with one counter
- /// that will never be incremented.
- ///
- /// For used/called functions, the coverageinfo was already added to the
- /// `function_coverage_map` (keyed by function `Instance`) during codegen.
- /// But in this case, since the unused function was _not_ previously
- /// codegenned, collect the coverage `CodeRegion`s from the MIR and add
- /// them. The first `CodeRegion` is used to add a single counter, with the
- /// same counter ID used in the injected `instrprof.increment` intrinsic
- /// call. Since the function is never called, all other `CodeRegion`s can be
- /// added as `unreachable_region`s.
- fn define_unused_fn(&self, def_id: DefId) {
- let instance = declare_unused_fn(self, def_id);
- codegen_unused_fn_and_counter(self, instance);
- add_unused_function_coverage(self, instance, def_id);
- }
}
impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
+ #[instrument(level = "debug", skip(self))]
fn add_coverage(&mut self, instance: Instance<'tcx>, coverage: &Coverage) {
+ // Our caller should have already taken care of inlining subtleties,
+ // so we can assume that counter/expression IDs in this coverage
+ // statement are meaningful for the given instance.
+ //
+ // (Either the statement was not inlined and directly belongs to this
+ // instance, or it was inlined *from* this instance.)
+
let bx = self;
+ let Some(function_coverage_info) =
+ bx.tcx.instance_mir(instance.def).function_coverage_info.as_deref()
+ else {
+ debug!("function has a coverage statement but no coverage info");
+ return;
+ };
+
let Some(coverage_context) = bx.coverage_context() else { return };
let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
let func_coverage = coverage_map
.entry(instance)
- .or_insert_with(|| FunctionCoverage::new(bx.tcx(), instance));
+ .or_insert_with(|| FunctionCoverageCollector::new(instance, function_coverage_info));
- let Coverage { kind, code_region } = coverage.clone();
- match kind {
- CoverageKind::Counter { function_source_hash, id } => {
- debug!(
- "ensuring function source hash is set for instance={:?}; function_source_hash={}",
- instance, function_source_hash,
- );
- func_coverage.set_function_source_hash(function_source_hash);
-
- if let Some(code_region) = code_region {
- // Note: Some counters do not have code regions, but may still be referenced
- // from expressions. In that case, don't add the counter to the coverage map,
- // but do inject the counter intrinsic.
- debug!(
- "adding counter to coverage_map: instance={:?}, id={:?}, region={:?}",
- instance, id, code_region,
- );
- func_coverage.add_counter(id, code_region);
- }
+ let Coverage { kind } = coverage;
+ match *kind {
+ CoverageKind::CounterIncrement { id } => {
+ func_coverage.mark_counter_id_seen(id);
// We need to explicitly drop the `RefMut` before calling into `instrprof_increment`,
// as that needs an exclusive borrow.
drop(coverage_map);
- let coverageinfo = bx.tcx().coverageinfo(instance.def);
+ // The number of counters passed to `llvm.instrprof.increment` might
+ // be smaller than the number originally inserted by the instrumentor,
+ // if some high-numbered counters were removed by MIR optimizations.
+ // If so, LLVM's profiler runtime will use fewer physical counters.
+ let num_counters =
+ bx.tcx().coverage_ids_info(instance.def).max_counter_id.as_u32() + 1;
+ assert!(
+ num_counters as usize <= function_coverage_info.num_counters,
+ "num_counters disagreement: query says {num_counters} but function info only has {}",
+ function_coverage_info.num_counters
+ );
let fn_name = bx.get_pgo_func_name_var(instance);
- let hash = bx.const_u64(function_source_hash);
- let num_counters = bx.const_u32(coverageinfo.num_counters);
+ let hash = bx.const_u64(function_coverage_info.function_source_hash);
+ let num_counters = bx.const_u32(num_counters);
let index = bx.const_u32(id.as_u32());
debug!(
"codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
@@ -145,105 +128,13 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
);
bx.instrprof_increment(fn_name, hash, num_counters, index);
}
- CoverageKind::Expression { id, lhs, op, rhs } => {
- debug!(
- "adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; region: {:?}",
- instance, id, lhs, op, rhs, code_region,
- );
- func_coverage.add_counter_expression(id, lhs, op, rhs, code_region);
- }
- CoverageKind::Unreachable => {
- let code_region =
- code_region.expect("unreachable regions always have code regions");
- debug!(
- "adding unreachable code to coverage_map: instance={:?}, at {:?}",
- instance, code_region,
- );
- func_coverage.add_unreachable_region(code_region);
+ CoverageKind::ExpressionUsed { id } => {
+ func_coverage.mark_expression_id_seen(id);
}
}
}
}
-fn declare_unused_fn<'tcx>(cx: &CodegenCx<'_, 'tcx>, def_id: DefId) -> Instance<'tcx> {
- let tcx = cx.tcx;
-
- let instance = Instance::new(
- def_id,
- GenericArgs::for_item(tcx, def_id, |param, _| {
- if let ty::GenericParamDefKind::Lifetime = param.kind {
- tcx.lifetimes.re_erased.into()
- } else {
- tcx.mk_param_from_def(param)
- }
- }),
- );
-
- let llfn = cx.declare_fn(
- tcx.symbol_name(instance).name,
- cx.fn_abi_of_fn_ptr(
- ty::Binder::dummy(tcx.mk_fn_sig(
- [Ty::new_unit(tcx)],
- Ty::new_unit(tcx),
- false,
- hir::Unsafety::Unsafe,
- Abi::Rust,
- )),
- ty::List::empty(),
- ),
- None,
- );
-
- llvm::set_linkage(llfn, llvm::Linkage::PrivateLinkage);
- llvm::set_visibility(llfn, llvm::Visibility::Default);
-
- assert!(cx.instances.borrow_mut().insert(instance, llfn).is_none());
-
- instance
-}
-
-fn codegen_unused_fn_and_counter<'tcx>(cx: &CodegenCx<'_, 'tcx>, instance: Instance<'tcx>) {
- let llfn = cx.get_fn(instance);
- let llbb = Builder::append_block(cx, llfn, "unused_function");
- let mut bx = Builder::build(cx, llbb);
- let fn_name = bx.get_pgo_func_name_var(instance);
- let hash = bx.const_u64(0);
- let num_counters = bx.const_u32(1);
- let index = bx.const_u32(u32::from(UNUSED_FUNCTION_COUNTER_ID));
- debug!(
- "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?},
- index={:?}) for unused function: {:?}",
- fn_name, hash, num_counters, index, instance
- );
- bx.instrprof_increment(fn_name, hash, num_counters, index);
- bx.ret_void();
-}
-
-fn add_unused_function_coverage<'tcx>(
- cx: &CodegenCx<'_, 'tcx>,
- instance: Instance<'tcx>,
- def_id: DefId,
-) {
- let tcx = cx.tcx;
-
- let mut function_coverage = FunctionCoverage::unused(tcx, instance);
- for (index, &code_region) in tcx.covered_code_regions(def_id).iter().enumerate() {
- if index == 0 {
- // Insert at least one real counter so the LLVM CoverageMappingReader will find expected
- // definitions.
- function_coverage.add_counter(UNUSED_FUNCTION_COUNTER_ID, code_region.clone());
- } else {
- function_coverage.add_unreachable_region(code_region.clone());
- }
- }
-
- if let Some(coverage_context) = cx.coverage_context() {
- coverage_context.function_coverage_map.borrow_mut().insert(instance, function_coverage);
- } else {
- bug!("Could not get the `coverage_context`");
- }
-}
-
/// Calls llvm::createPGOFuncNameVar() with the given function instance's
/// mangled function name. The LLVM API returns an llvm::GlobalVariable
/// containing the function name, with the specific variable name and linkage
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
index aff764f02..6a63eda4b 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
@@ -75,7 +75,10 @@ fn make_mir_scope<'ll, 'tcx>(
return;
};
- if let Some(vars) = variables && !vars.contains(scope) && scope_data.inlined.is_none() {
+ if let Some(vars) = variables
+ && !vars.contains(scope)
+ && scope_data.inlined.is_none()
+ {
// Do not create a DIScope if there are no variables defined in this
// MIR `SourceScope`, and it's not `inlined`, to avoid debuginfo bloat.
debug_context.scopes[scope] = parent_scope;
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index ed9387616..865bf01c8 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -335,12 +335,20 @@ fn build_subroutine_type_di_node<'ll, 'tcx>(
// This is actually a function pointer, so wrap it in pointer DI.
let name = compute_debuginfo_type_name(cx.tcx, fn_ty, false);
+ let (size, align) = match fn_ty.kind() {
+ ty::FnDef(..) => (0, 1),
+ ty::FnPtr(..) => (
+ cx.tcx.data_layout.pointer_size.bits(),
+ cx.tcx.data_layout.pointer_align.abi.bits() as u32,
+ ),
+ _ => unreachable!(),
+ };
let di_node = unsafe {
llvm::LLVMRustDIBuilderCreatePointerType(
DIB(cx),
fn_di_node,
- cx.tcx.data_layout.pointer_size.bits(),
- cx.tcx.data_layout.pointer_align.abi.bits() as u32,
+ size,
+ align,
0, // Ignore DWARF address space.
name.as_ptr().cast(),
name.len(),
@@ -452,7 +460,7 @@ pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll D
}
ty::FnDef(..) | ty::FnPtr(_) => build_subroutine_type_di_node(cx, unique_type_id),
ty::Closure(..) => build_closure_env_di_node(cx, unique_type_id),
- ty::Generator(..) => enums::build_generator_di_node(cx, unique_type_id),
+ ty::Coroutine(..) => enums::build_coroutine_di_node(cx, unique_type_id),
ty::Adt(def, ..) => match def.adt_kind() {
AdtKind::Struct => build_struct_type_di_node(cx, unique_type_id),
AdtKind::Union => build_union_type_di_node(cx, unique_type_id),
@@ -539,48 +547,77 @@ pub fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFile) ->
) -> &'ll DIFile {
debug!(?source_file.name);
+ use rustc_session::RemapFileNameExt;
let (directory, file_name) = match &source_file.name {
FileName::Real(filename) => {
let working_directory = &cx.sess().opts.working_dir;
debug!(?working_directory);
- let filename = cx
- .sess()
- .source_map()
- .path_mapping()
- .to_embeddable_absolute_path(filename.clone(), working_directory);
-
- // Construct the absolute path of the file
- let abs_path = filename.remapped_path_if_available();
- debug!(?abs_path);
-
- if let Ok(rel_path) =
- abs_path.strip_prefix(working_directory.remapped_path_if_available())
- {
- // If the compiler's working directory (which also is the DW_AT_comp_dir of
- // the compilation unit) is a prefix of the path we are about to emit, then
- // only emit the part relative to the working directory.
- // Because of path remapping we sometimes see strange things here: `abs_path`
- // might actually look like a relative path
- // (e.g. `<crate-name-and-version>/src/lib.rs`), so if we emit it without
- // taking the working directory into account, downstream tooling will
- // interpret it as `<working-directory>/<crate-name-and-version>/src/lib.rs`,
- // which makes no sense. Usually in such cases the working directory will also
- // be remapped to `<crate-name-and-version>` or some other prefix of the path
- // we are remapping, so we end up with
- // `<crate-name-and-version>/<crate-name-and-version>/src/lib.rs`.
- // By moving the working directory portion into the `directory` part of the
- // DIFile, we allow LLVM to emit just the relative path for DWARF, while
- // still emitting the correct absolute path for CodeView.
- (
- working_directory.to_string_lossy(FileNameDisplayPreference::Remapped),
- rel_path.to_string_lossy().into_owned(),
- )
+ if cx.sess().should_prefer_remapped_for_codegen() {
+ let filename = cx
+ .sess()
+ .source_map()
+ .path_mapping()
+ .to_embeddable_absolute_path(filename.clone(), working_directory);
+
+ // Construct the absolute path of the file
+ let abs_path = filename.remapped_path_if_available();
+ debug!(?abs_path);
+
+ if let Ok(rel_path) =
+ abs_path.strip_prefix(working_directory.remapped_path_if_available())
+ {
+ // If the compiler's working directory (which also is the DW_AT_comp_dir of
+ // the compilation unit) is a prefix of the path we are about to emit, then
+ // only emit the part relative to the working directory.
+ // Because of path remapping we sometimes see strange things here: `abs_path`
+ // might actually look like a relative path
+ // (e.g. `<crate-name-and-version>/src/lib.rs`), so if we emit it without
+ // taking the working directory into account, downstream tooling will
+ // interpret it as `<working-directory>/<crate-name-and-version>/src/lib.rs`,
+ // which makes no sense. Usually in such cases the working directory will also
+ // be remapped to `<crate-name-and-version>` or some other prefix of the path
+ // we are remapping, so we end up with
+ // `<crate-name-and-version>/<crate-name-and-version>/src/lib.rs`.
+ // By moving the working directory portion into the `directory` part of the
+ // DIFile, we allow LLVM to emit just the relative path for DWARF, while
+ // still emitting the correct absolute path for CodeView.
+ (
+ working_directory.to_string_lossy(FileNameDisplayPreference::Remapped),
+ rel_path.to_string_lossy().into_owned(),
+ )
+ } else {
+ ("".into(), abs_path.to_string_lossy().into_owned())
+ }
} else {
- ("".into(), abs_path.to_string_lossy().into_owned())
+ let working_directory = working_directory.local_path_if_available();
+ let filename = filename.local_path_if_available();
+
+ debug!(?working_directory, ?filename);
+
+ let abs_path: Cow<'_, Path> = if filename.is_absolute() {
+ filename.into()
+ } else {
+ let mut p = PathBuf::new();
+ p.push(working_directory);
+ p.push(filename);
+ p.into()
+ };
+
+ if let Ok(rel_path) = abs_path.strip_prefix(working_directory) {
+ (
+ working_directory.to_string_lossy().into(),
+ rel_path.to_string_lossy().into_owned(),
+ )
+ } else {
+ ("".into(), abs_path.to_string_lossy().into_owned())
+ }
}
}
- other => ("".into(), other.prefer_remapped().to_string_lossy().into_owned()),
+ other => {
+ debug!(?other);
+ ("".into(), other.for_codegen(cx.sess()).to_string_lossy().into_owned())
+ }
};
let hash_kind = match source_file.src_hash.kind {
@@ -814,8 +851,9 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>(
// FIXME(#41252) Remove "clang LLVM" if we can get GDB and LLVM to play nice.
let producer = format!("clang LLVM ({rustc_producer})");
+ use rustc_session::RemapFileNameExt;
let name_in_debuginfo = name_in_debuginfo.to_string_lossy();
- let work_dir = tcx.sess.opts.working_dir.to_string_lossy(FileNameDisplayPreference::Remapped);
+ let work_dir = tcx.sess.opts.working_dir.for_codegen(&tcx.sess).to_string_lossy();
let flags = "\0";
let output_filenames = tcx.output_filenames(());
let split_name = if tcx.sess.target_can_use_split_dwarf() {
@@ -826,7 +864,13 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>(
Some(codegen_unit_name),
)
// We get a path relative to the working directory from split_dwarf_path
- .map(|f| tcx.sess.source_map().path_mapping().map_prefix(f).0)
+ .map(|f| {
+ if tcx.sess.should_prefer_remapped_for_split_debuginfo_paths() {
+ tcx.sess.source_map().path_mapping().map_prefix(f).0
+ } else {
+ f.into()
+ }
+ })
} else {
None
}
@@ -982,20 +1026,20 @@ fn build_struct_type_di_node<'ll, 'tcx>(
// Tuples
//=-----------------------------------------------------------------------------
-/// Builds the DW_TAG_member debuginfo nodes for the upvars of a closure or generator.
-/// For a generator, this will handle upvars shared by all states.
+/// Builds the DW_TAG_member debuginfo nodes for the upvars of a closure or coroutine.
+/// For a coroutine, this will handle upvars shared by all states.
fn build_upvar_field_di_nodes<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
- closure_or_generator_ty: Ty<'tcx>,
- closure_or_generator_di_node: &'ll DIType,
+ closure_or_coroutine_ty: Ty<'tcx>,
+ closure_or_coroutine_di_node: &'ll DIType,
) -> SmallVec<&'ll DIType> {
- let (&def_id, up_var_tys) = match closure_or_generator_ty.kind() {
- ty::Generator(def_id, args, _) => (def_id, args.as_generator().prefix_tys()),
+ let (&def_id, up_var_tys) = match closure_or_coroutine_ty.kind() {
+ ty::Coroutine(def_id, args, _) => (def_id, args.as_coroutine().prefix_tys()),
ty::Closure(def_id, args) => (def_id, args.as_closure().upvar_tys()),
_ => {
bug!(
- "build_upvar_field_di_nodes() called with non-closure-or-generator-type: {:?}",
- closure_or_generator_ty
+ "build_upvar_field_di_nodes() called with non-closure-or-coroutine-type: {:?}",
+ closure_or_coroutine_ty
)
}
};
@@ -1005,7 +1049,7 @@ fn build_upvar_field_di_nodes<'ll, 'tcx>(
);
let capture_names = cx.tcx.closure_saved_names_of_captured_variables(def_id);
- let layout = cx.layout_of(closure_or_generator_ty);
+ let layout = cx.layout_of(closure_or_coroutine_ty);
up_var_tys
.into_iter()
@@ -1014,7 +1058,7 @@ fn build_upvar_field_di_nodes<'ll, 'tcx>(
.map(|(index, (up_var_ty, capture_name))| {
build_field_di_node(
cx,
- closure_or_generator_di_node,
+ closure_or_coroutine_di_node,
capture_name.as_str(),
cx.size_and_align_of(up_var_ty),
layout.fields.offset(index),
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
index 88040557a..ca7bfbeac 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
@@ -12,7 +12,7 @@ use rustc_middle::{
ty::{
self,
layout::{LayoutOf, TyAndLayout},
- AdtDef, GeneratorArgs, Ty,
+ AdtDef, CoroutineArgs, Ty,
},
};
use rustc_target::abi::{Align, Endian, Size, TagEncoding, VariantIdx, Variants};
@@ -268,18 +268,18 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
)
}
-/// A generator debuginfo node looks the same as a that of an enum type.
+/// A coroutine debuginfo node looks the same as a that of an enum type.
///
/// See [build_enum_type_di_node] for more information.
-pub(super) fn build_generator_di_node<'ll, 'tcx>(
+pub(super) fn build_coroutine_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
unique_type_id: UniqueTypeId<'tcx>,
) -> DINodeCreationResult<'ll> {
- let generator_type = unique_type_id.expect_ty();
- let generator_type_and_layout = cx.layout_of(generator_type);
- let generator_type_name = compute_debuginfo_type_name(cx.tcx, generator_type, false);
+ let coroutine_type = unique_type_id.expect_ty();
+ let coroutine_type_and_layout = cx.layout_of(coroutine_type);
+ let coroutine_type_name = compute_debuginfo_type_name(cx.tcx, coroutine_type, false);
- debug_assert!(!wants_c_like_enum_debuginfo(generator_type_and_layout));
+ debug_assert!(!wants_c_like_enum_debuginfo(coroutine_type_and_layout));
type_map::build_type_with_children(
cx,
@@ -287,24 +287,24 @@ pub(super) fn build_generator_di_node<'ll, 'tcx>(
cx,
type_map::Stub::Union,
unique_type_id,
- &generator_type_name,
- size_and_align_of(generator_type_and_layout),
+ &coroutine_type_name,
+ size_and_align_of(coroutine_type_and_layout),
NO_SCOPE_METADATA,
DIFlags::FlagZero,
),
- |cx, generator_type_di_node| match generator_type_and_layout.variants {
+ |cx, coroutine_type_di_node| match coroutine_type_and_layout.variants {
Variants::Multiple { tag_encoding: TagEncoding::Direct, .. } => {
- build_union_fields_for_direct_tag_generator(
+ build_union_fields_for_direct_tag_coroutine(
cx,
- generator_type_and_layout,
- generator_type_di_node,
+ coroutine_type_and_layout,
+ coroutine_type_di_node,
)
}
Variants::Single { .. }
| Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, .. } => {
bug!(
- "Encountered generator with non-direct-tag layout: {:?}",
- generator_type_and_layout
+ "Encountered coroutine with non-direct-tag layout: {:?}",
+ coroutine_type_and_layout
)
}
},
@@ -428,7 +428,7 @@ fn build_union_fields_for_enum<'ll, 'tcx>(
})
.collect();
- build_union_fields_for_direct_tag_enum_or_generator(
+ build_union_fields_for_direct_tag_enum_or_coroutine(
cx,
enum_type_and_layout,
enum_type_di_node,
@@ -469,8 +469,8 @@ fn build_variant_names_type_di_node<'ll, 'tcx>(
fn build_variant_struct_wrapper_type_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
- enum_or_generator_type_and_layout: TyAndLayout<'tcx>,
- enum_or_generator_type_di_node: &'ll DIType,
+ enum_or_coroutine_type_and_layout: TyAndLayout<'tcx>,
+ enum_or_coroutine_type_di_node: &'ll DIType,
variant_index: VariantIdx,
untagged_variant_index: Option<VariantIdx>,
variant_struct_type_di_node: &'ll DIType,
@@ -486,13 +486,13 @@ fn build_variant_struct_wrapper_type_di_node<'ll, 'tcx>(
Stub::Struct,
UniqueTypeId::for_enum_variant_struct_type_wrapper(
cx.tcx,
- enum_or_generator_type_and_layout.ty,
+ enum_or_coroutine_type_and_layout.ty,
variant_index,
),
&variant_struct_wrapper_type_name(variant_index),
// NOTE: We use size and align of enum_type, not from variant_layout:
- size_and_align_of(enum_or_generator_type_and_layout),
- Some(enum_or_generator_type_di_node),
+ size_and_align_of(enum_or_coroutine_type_and_layout),
+ Some(enum_or_coroutine_type_di_node),
DIFlags::FlagZero,
),
|cx, wrapper_struct_type_di_node| {
@@ -535,7 +535,7 @@ fn build_variant_struct_wrapper_type_di_node<'ll, 'tcx>(
cx,
wrapper_struct_type_di_node,
"value",
- size_and_align_of(enum_or_generator_type_and_layout),
+ size_and_align_of(enum_or_coroutine_type_and_layout),
Size::ZERO,
DIFlags::FlagZero,
variant_struct_type_di_node,
@@ -662,40 +662,40 @@ fn split_128(value: u128) -> Split128 {
Split128 { hi: (value >> 64) as u64, lo: value as u64 }
}
-fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
+fn build_union_fields_for_direct_tag_coroutine<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
- generator_type_and_layout: TyAndLayout<'tcx>,
- generator_type_di_node: &'ll DIType,
+ coroutine_type_and_layout: TyAndLayout<'tcx>,
+ coroutine_type_di_node: &'ll DIType,
) -> SmallVec<&'ll DIType> {
let Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } =
- generator_type_and_layout.variants
+ coroutine_type_and_layout.variants
else {
bug!("This function only supports layouts with directly encoded tags.")
};
- let (generator_def_id, generator_args) = match generator_type_and_layout.ty.kind() {
- &ty::Generator(def_id, args, _) => (def_id, args.as_generator()),
+ let (coroutine_def_id, coroutine_args) = match coroutine_type_and_layout.ty.kind() {
+ &ty::Coroutine(def_id, args, _) => (def_id, args.as_coroutine()),
_ => unreachable!(),
};
- let generator_layout = cx.tcx.optimized_mir(generator_def_id).generator_layout().unwrap();
+ let coroutine_layout = cx.tcx.optimized_mir(coroutine_def_id).coroutine_layout().unwrap();
- let common_upvar_names = cx.tcx.closure_saved_names_of_captured_variables(generator_def_id);
- let variant_range = generator_args.variant_range(generator_def_id, cx.tcx);
+ let common_upvar_names = cx.tcx.closure_saved_names_of_captured_variables(coroutine_def_id);
+ let variant_range = coroutine_args.variant_range(coroutine_def_id, cx.tcx);
let variant_count = (variant_range.start.as_u32()..variant_range.end.as_u32()).len();
- let tag_base_type = tag_base_type(cx, generator_type_and_layout);
+ let tag_base_type = tag_base_type(cx, coroutine_type_and_layout);
let variant_names_type_di_node = build_variant_names_type_di_node(
cx,
- generator_type_di_node,
+ coroutine_type_di_node,
variant_range
.clone()
- .map(|variant_index| (variant_index, GeneratorArgs::variant_name(variant_index))),
+ .map(|variant_index| (variant_index, CoroutineArgs::variant_name(variant_index))),
);
let discriminants: IndexVec<VariantIdx, DiscrResult> = {
- let discriminants_iter = generator_args.discriminants(generator_def_id, cx.tcx);
+ let discriminants_iter = coroutine_args.discriminants(coroutine_def_id, cx.tcx);
let mut discriminants: IndexVec<VariantIdx, DiscrResult> =
IndexVec::with_capacity(variant_count);
for (variant_index, discr) in discriminants_iter {
@@ -709,16 +709,16 @@ fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
// Build the type node for each field.
let variant_field_infos: SmallVec<VariantFieldInfo<'ll>> = variant_range
.map(|variant_index| {
- let variant_struct_type_di_node = super::build_generator_variant_struct_type_di_node(
+ let variant_struct_type_di_node = super::build_coroutine_variant_struct_type_di_node(
cx,
variant_index,
- generator_type_and_layout,
- generator_type_di_node,
- generator_layout,
+ coroutine_type_and_layout,
+ coroutine_type_di_node,
+ coroutine_layout,
&common_upvar_names,
);
- let span = generator_layout.variant_source_info[variant_index].span;
+ let span = coroutine_layout.variant_source_info[variant_index].span;
let source_info = if !span.is_dummy() {
let loc = cx.lookup_debug_loc(span.lo());
Some((file_metadata(cx, &loc.file), loc.line as c_uint))
@@ -735,10 +735,10 @@ fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
})
.collect();
- build_union_fields_for_direct_tag_enum_or_generator(
+ build_union_fields_for_direct_tag_enum_or_coroutine(
cx,
- generator_type_and_layout,
- generator_type_di_node,
+ coroutine_type_and_layout,
+ coroutine_type_di_node,
&variant_field_infos[..],
variant_names_type_di_node,
tag_base_type,
@@ -747,9 +747,9 @@ fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
)
}
-/// This is a helper function shared between enums and generators that makes sure fields have the
+/// This is a helper function shared between enums and coroutines that makes sure fields have the
/// expect names.
-fn build_union_fields_for_direct_tag_enum_or_generator<'ll, 'tcx>(
+fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
enum_type_and_layout: TyAndLayout<'tcx>,
enum_type_di_node: &'ll DIType,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
index d3239d5c3..df1df6d19 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
@@ -6,11 +6,11 @@ use rustc_hir::def::CtorKind;
use rustc_index::IndexSlice;
use rustc_middle::{
bug,
- mir::GeneratorLayout,
+ mir::CoroutineLayout,
ty::{
self,
layout::{IntegerExt, LayoutOf, PrimitiveExt, TyAndLayout},
- AdtDef, GeneratorArgs, Ty, VariantDef,
+ AdtDef, CoroutineArgs, Ty, VariantDef,
},
};
use rustc_span::Symbol;
@@ -66,14 +66,14 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
}
}
-pub(super) fn build_generator_di_node<'ll, 'tcx>(
+pub(super) fn build_coroutine_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
unique_type_id: UniqueTypeId<'tcx>,
) -> DINodeCreationResult<'ll> {
if cpp_like_debuginfo(cx.tcx) {
- cpp_like::build_generator_di_node(cx, unique_type_id)
+ cpp_like::build_coroutine_di_node(cx, unique_type_id)
} else {
- native::build_generator_di_node(cx, unique_type_id)
+ native::build_coroutine_di_node(cx, unique_type_id)
}
}
@@ -101,13 +101,13 @@ fn build_c_style_enum_di_node<'ll, 'tcx>(
}
}
-/// Extract the type with which we want to describe the tag of the given enum or generator.
+/// Extract the type with which we want to describe the tag of the given enum or coroutine.
fn tag_base_type<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
enum_type_and_layout: TyAndLayout<'tcx>,
) -> Ty<'tcx> {
debug_assert!(match enum_type_and_layout.ty.kind() {
- ty::Generator(..) => true,
+ ty::Coroutine(..) => true,
ty::Adt(adt_def, _) => adt_def.is_enum(),
_ => false,
});
@@ -300,8 +300,8 @@ fn build_enum_variant_struct_type_di_node<'ll, 'tcx>(
.di_node
}
-/// Build the struct type for describing a single generator state.
-/// See [build_generator_variant_struct_type_di_node].
+/// Build the struct type for describing a single coroutine state.
+/// See [build_coroutine_variant_struct_type_di_node].
///
/// ```txt
///
@@ -317,25 +317,25 @@ fn build_enum_variant_struct_type_di_node<'ll, 'tcx>(
/// ---> DW_TAG_structure_type (type of variant 3)
///
/// ```
-pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
+pub fn build_coroutine_variant_struct_type_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
variant_index: VariantIdx,
- generator_type_and_layout: TyAndLayout<'tcx>,
- generator_type_di_node: &'ll DIType,
- generator_layout: &GeneratorLayout<'tcx>,
+ coroutine_type_and_layout: TyAndLayout<'tcx>,
+ coroutine_type_di_node: &'ll DIType,
+ coroutine_layout: &CoroutineLayout<'tcx>,
common_upvar_names: &IndexSlice<FieldIdx, Symbol>,
) -> &'ll DIType {
- let variant_name = GeneratorArgs::variant_name(variant_index);
+ let variant_name = CoroutineArgs::variant_name(variant_index);
let unique_type_id = UniqueTypeId::for_enum_variant_struct_type(
cx.tcx,
- generator_type_and_layout.ty,
+ coroutine_type_and_layout.ty,
variant_index,
);
- let variant_layout = generator_type_and_layout.for_variant(cx, variant_index);
+ let variant_layout = coroutine_type_and_layout.for_variant(cx, variant_index);
- let generator_args = match generator_type_and_layout.ty.kind() {
- ty::Generator(_, args, _) => args.as_generator(),
+ let coroutine_args = match coroutine_type_and_layout.ty.kind() {
+ ty::Coroutine(_, args, _) => args.as_coroutine(),
_ => unreachable!(),
};
@@ -346,17 +346,17 @@ pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
Stub::Struct,
unique_type_id,
&variant_name,
- size_and_align_of(generator_type_and_layout),
- Some(generator_type_di_node),
+ size_and_align_of(coroutine_type_and_layout),
+ Some(coroutine_type_di_node),
DIFlags::FlagZero,
),
|cx, variant_struct_type_di_node| {
// Fields that just belong to this variant/state
let state_specific_fields: SmallVec<_> = (0..variant_layout.fields.count())
.map(|field_index| {
- let generator_saved_local = generator_layout.variant_fields[variant_index]
+ let coroutine_saved_local = coroutine_layout.variant_fields[variant_index]
[FieldIdx::from_usize(field_index)];
- let field_name_maybe = generator_layout.field_names[generator_saved_local];
+ let field_name_maybe = coroutine_layout.field_names[coroutine_saved_local];
let field_name = field_name_maybe
.as_ref()
.map(|s| Cow::from(s.as_str()))
@@ -377,7 +377,7 @@ pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
.collect();
// Fields that are common to all states
- let common_fields: SmallVec<_> = generator_args
+ let common_fields: SmallVec<_> = coroutine_args
.prefix_tys()
.iter()
.zip(common_upvar_names)
@@ -388,7 +388,7 @@ pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
variant_struct_type_di_node,
upvar_name.as_str(),
cx.size_and_align_of(upvar_ty),
- generator_type_and_layout.fields.offset(index),
+ coroutine_type_and_layout.fields.offset(index),
DIFlags::FlagZero,
type_di_node(cx, upvar_ty),
)
@@ -397,7 +397,7 @@ pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
state_specific_fields.into_iter().chain(common_fields.into_iter()).collect()
},
- |cx| build_generic_type_param_di_nodes(cx, generator_type_and_layout.ty),
+ |cx| build_generic_type_param_di_nodes(cx, coroutine_type_and_layout.ty),
)
.di_node
}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
index feac40d8c..7eff52b85 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
@@ -110,12 +110,12 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
)
}
-/// Build the debuginfo node for a generator environment. It looks the same as the debuginfo for
+/// Build the debuginfo node for a coroutine environment. It looks the same as the debuginfo for
/// an enum. See [build_enum_type_di_node] for more information.
///
/// ```txt
///
-/// ---> DW_TAG_structure_type (top-level type for the generator)
+/// ---> DW_TAG_structure_type (top-level type for the coroutine)
/// DW_TAG_variant_part (variant part)
/// DW_AT_discr (reference to discriminant DW_TAG_member)
/// DW_TAG_member (discriminant member)
@@ -127,21 +127,21 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
/// DW_TAG_structure_type (type of variant 3)
///
/// ```
-pub(super) fn build_generator_di_node<'ll, 'tcx>(
+pub(super) fn build_coroutine_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
unique_type_id: UniqueTypeId<'tcx>,
) -> DINodeCreationResult<'ll> {
- let generator_type = unique_type_id.expect_ty();
- let &ty::Generator(generator_def_id, _, _) = generator_type.kind() else {
- bug!("build_generator_di_node() called with non-generator type: `{:?}`", generator_type)
+ let coroutine_type = unique_type_id.expect_ty();
+ let &ty::Coroutine(coroutine_def_id, _, _) = coroutine_type.kind() else {
+ bug!("build_coroutine_di_node() called with non-coroutine type: `{:?}`", coroutine_type)
};
- let containing_scope = get_namespace_for_item(cx, generator_def_id);
- let generator_type_and_layout = cx.layout_of(generator_type);
+ let containing_scope = get_namespace_for_item(cx, coroutine_def_id);
+ let coroutine_type_and_layout = cx.layout_of(coroutine_type);
- debug_assert!(!wants_c_like_enum_debuginfo(generator_type_and_layout));
+ debug_assert!(!wants_c_like_enum_debuginfo(coroutine_type_and_layout));
- let generator_type_name = compute_debuginfo_type_name(cx.tcx, generator_type, false);
+ let coroutine_type_name = compute_debuginfo_type_name(cx.tcx, coroutine_type, false);
type_map::build_type_with_children(
cx,
@@ -149,37 +149,37 @@ pub(super) fn build_generator_di_node<'ll, 'tcx>(
cx,
Stub::Struct,
unique_type_id,
- &generator_type_name,
- size_and_align_of(generator_type_and_layout),
+ &coroutine_type_name,
+ size_and_align_of(coroutine_type_and_layout),
Some(containing_scope),
DIFlags::FlagZero,
),
- |cx, generator_type_di_node| {
- let generator_layout =
- cx.tcx.optimized_mir(generator_def_id).generator_layout().unwrap();
+ |cx, coroutine_type_di_node| {
+ let coroutine_layout =
+ cx.tcx.optimized_mir(coroutine_def_id).coroutine_layout().unwrap();
let Variants::Multiple { tag_encoding: TagEncoding::Direct, ref variants, .. } =
- generator_type_and_layout.variants
+ coroutine_type_and_layout.variants
else {
bug!(
- "Encountered generator with non-direct-tag layout: {:?}",
- generator_type_and_layout
+ "Encountered coroutine with non-direct-tag layout: {:?}",
+ coroutine_type_and_layout
)
};
let common_upvar_names =
- cx.tcx.closure_saved_names_of_captured_variables(generator_def_id);
+ cx.tcx.closure_saved_names_of_captured_variables(coroutine_def_id);
// Build variant struct types
let variant_struct_type_di_nodes: SmallVec<_> = variants
.indices()
.map(|variant_index| {
// FIXME: This is problematic because just a number is not a valid identifier.
- // GeneratorArgs::variant_name(variant_index), would be consistent
+ // CoroutineArgs::variant_name(variant_index), would be consistent
// with enums?
let variant_name = format!("{}", variant_index.as_usize()).into();
- let span = generator_layout.variant_source_info[variant_index].span;
+ let span = coroutine_layout.variant_source_info[variant_index].span;
let source_info = if !span.is_dummy() {
let loc = cx.lookup_debug_loc(span.lo());
Some((file_metadata(cx, &loc.file), loc.line))
@@ -191,12 +191,12 @@ pub(super) fn build_generator_di_node<'ll, 'tcx>(
variant_index,
variant_name,
variant_struct_type_di_node:
- super::build_generator_variant_struct_type_di_node(
+ super::build_coroutine_variant_struct_type_di_node(
cx,
variant_index,
- generator_type_and_layout,
- generator_type_di_node,
- generator_layout,
+ coroutine_type_and_layout,
+ coroutine_type_di_node,
+ coroutine_layout,
&common_upvar_names,
),
source_info,
@@ -206,18 +206,18 @@ pub(super) fn build_generator_di_node<'ll, 'tcx>(
smallvec![build_enum_variant_part_di_node(
cx,
- generator_type_and_layout,
- generator_type_di_node,
+ coroutine_type_and_layout,
+ coroutine_type_di_node,
&variant_struct_type_di_nodes[..],
)]
},
- // We don't seem to be emitting generic args on the generator type, it seems. Rather
+ // We don't seem to be emitting generic args on the coroutine type, it seems. Rather
// they get attached to the struct type of each variant.
NO_GENERICS,
)
}
-/// Builds the DW_TAG_variant_part of an enum or generator debuginfo node:
+/// Builds the DW_TAG_variant_part of an enum or coroutine debuginfo node:
///
/// ```txt
/// DW_TAG_structure_type (top-level type for enum)
@@ -306,11 +306,11 @@ fn build_enum_variant_part_di_node<'ll, 'tcx>(
/// ```
fn build_discr_member_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
- enum_or_generator_type_and_layout: TyAndLayout<'tcx>,
- enum_or_generator_type_di_node: &'ll DIType,
+ enum_or_coroutine_type_and_layout: TyAndLayout<'tcx>,
+ enum_or_coroutine_type_di_node: &'ll DIType,
) -> Option<&'ll DIType> {
- let tag_name = match enum_or_generator_type_and_layout.ty.kind() {
- ty::Generator(..) => "__state",
+ let tag_name = match enum_or_coroutine_type_and_layout.ty.kind() {
+ ty::Coroutine(..) => "__state",
_ => "",
};
@@ -320,14 +320,14 @@ fn build_discr_member_di_node<'ll, 'tcx>(
// In LLVM IR the wrong scope will be listed but when DWARF is
// generated from it, the DW_TAG_member will be a child the
// DW_TAG_variant_part.
- let containing_scope = enum_or_generator_type_di_node;
+ let containing_scope = enum_or_coroutine_type_di_node;
- match enum_or_generator_type_and_layout.layout.variants() {
+ match enum_or_coroutine_type_and_layout.layout.variants() {
// A single-variant enum has no discriminant.
&Variants::Single { .. } => None,
&Variants::Multiple { tag_field, .. } => {
- let tag_base_type = tag_base_type(cx, enum_or_generator_type_and_layout);
+ let tag_base_type = tag_base_type(cx, enum_or_coroutine_type_and_layout);
let (size, align) = cx.size_and_align_of(tag_base_type);
unsafe {
@@ -340,7 +340,7 @@ fn build_discr_member_di_node<'ll, 'tcx>(
UNKNOWN_LINE_NUMBER,
size.bits(),
align.bits() as u32,
- enum_or_generator_type_and_layout.fields.offset(tag_field).bits(),
+ enum_or_coroutine_type_and_layout.fields.offset(tag_field).bits(),
DIFlags::FlagArtificial,
type_di_node(cx, tag_base_type),
))
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs
index e30622cbd..1aec65cf9 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs
@@ -43,7 +43,7 @@ pub(super) enum UniqueTypeId<'tcx> {
/// The ID of a regular type as it shows up at the language level.
Ty(Ty<'tcx>, private::HiddenZst),
/// The ID for the single DW_TAG_variant_part nested inside the top-level
- /// DW_TAG_structure_type that describes enums and generators.
+ /// DW_TAG_structure_type that describes enums and coroutines.
VariantPart(Ty<'tcx>, private::HiddenZst),
/// The ID for the artificial struct type describing a single enum variant.
VariantStructType(Ty<'tcx>, VariantIdx, private::HiddenZst),
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index 30cc9ea9b..4832b147a 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -50,7 +50,6 @@ mod utils;
pub use self::create_scope_map::compute_mir_scopes;
pub use self::metadata::build_global_var_di_node;
-pub use self::metadata::extend_scope_to_file;
#[allow(non_upper_case_globals)]
const DW_TAG_auto_variable: c_uint = 0x100;
@@ -342,7 +341,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
// We look up the generics of the enclosing function and truncate the args
// to their length in order to cut off extra stuff that might be in there for
- // closures or generators.
+ // closures or coroutines.
let generics = tcx.generics_of(enclosing_fn_def_id);
let args = instance.args.truncate_to(tcx, generics);
@@ -537,7 +536,9 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
// Only "class" methods are generally understood by LLVM,
// so avoid methods on other types (e.g., `<*mut T>::null`).
- if let ty::Adt(def, ..) = impl_self_ty.kind() && !def.is_box() {
+ if let ty::Adt(def, ..) = impl_self_ty.kind()
+ && !def.is_box()
+ {
// Again, only create type information if full debuginfo is enabled
if cx.sess().opts.debuginfo == DebugInfo::Full && !impl_self_ty.has_param()
{
diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs
index 665d19579..10ca5ad80 100644
--- a/compiler/rustc_codegen_llvm/src/errors.rs
+++ b/compiler/rustc_codegen_llvm/src/errors.rs
@@ -26,6 +26,13 @@ pub(crate) struct UnknownCTargetFeature<'a> {
pub rust_feature: PossibleFeature<'a>,
}
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_unstable_ctarget_feature)]
+#[note]
+pub(crate) struct UnstableCTargetFeature<'a> {
+ pub feature: &'a str,
+}
+
#[derive(Subdiagnostic)]
pub(crate) enum PossibleFeature<'a> {
#[help(codegen_llvm_possible_feature)]
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index a97b803fc..cc7e78b9c 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -935,9 +935,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}
macro_rules! require_simd {
- ($ty: expr, $diag: expr) => {
- require!($ty.is_simd(), $diag)
- };
+ ($ty: expr, $variant:ident) => {{
+ require!($ty.is_simd(), InvalidMonomorphization::$variant { span, name, ty: $ty });
+ $ty.simd_size_and_type(bx.tcx())
+ }};
}
let tcx = bx.tcx();
@@ -946,12 +947,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let arg_tys = sig.inputs();
if name == sym::simd_select_bitmask {
- require_simd!(
- arg_tys[1],
- InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] }
- );
-
- let (len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
+ let (len, _) = require_simd!(arg_tys[1], SimdArgument);
let expected_int_bits = (len.max(8) - 1).next_power_of_two();
let expected_bytes = len / 8 + ((len % 8 > 0) as u64);
@@ -988,7 +984,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}
// every intrinsic below takes a SIMD vector as its first argument
- require_simd!(arg_tys[0], InvalidMonomorphization::SimdInput { span, name, ty: arg_tys[0] });
+ let (in_len, in_elem) = require_simd!(arg_tys[0], SimdInput);
let in_ty = arg_tys[0];
let comparison = match name {
@@ -1001,11 +997,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
_ => None,
};
- let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx());
if let Some(cmp_op) = comparison {
- require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
-
- let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+ let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
require!(
in_len == out_len,
@@ -1041,8 +1034,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
.unwrap_branch();
let n = idx.len() as u64;
- require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
- let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+ let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
require!(
out_len == n,
InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
@@ -1099,8 +1091,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}),
};
- require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
- let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+ let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
require!(
out_len == n,
InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
@@ -1179,11 +1170,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
if name == sym::simd_select {
let m_elem_ty = in_elem;
let m_len = in_len;
- require_simd!(
- arg_tys[1],
- InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] }
- );
- let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
+ let (v_len, _) = require_simd!(arg_tys[1], SimdArgument);
require!(
m_len == v_len,
InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
@@ -1401,20 +1388,16 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
// * M: any integer width is supported, will be truncated to i1
// All types must be simd vector types
- require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty });
- require_simd!(
- arg_tys[1],
- InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] }
- );
- require_simd!(
- arg_tys[2],
- InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] }
- );
- require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
+
+ // The second argument must be a simd vector with an element type that's a pointer
+ // to the element type of the first argument
+ let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
+ let (out_len, element_ty1) = require_simd!(arg_tys[1], SimdSecond);
+ // The element type of the third argument must be a signed integer type of any width:
+ let (out_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird);
+ require_simd!(ret_ty, SimdReturn);
// Of the same length:
- let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
- let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
InvalidMonomorphization::SecondArgumentLength {
@@ -1444,11 +1427,6 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
);
- // The second argument must be a simd vector with an element type that's a pointer
- // to the element type of the first argument
- let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
- let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
-
require!(
matches!(
element_ty1.kind(),
@@ -1465,20 +1443,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}
);
- // The element type of the third argument must be a signed integer type of any width:
- let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
match element_ty2.kind() {
ty::Int(_) => (),
_ => {
- require!(
- false,
- InvalidMonomorphization::ThirdArgElementType {
- span,
- name,
- expected_element: element_ty2,
- third_arg: arg_tys[2]
- }
- );
+ return_error!(InvalidMonomorphization::ThirdArgElementType {
+ span,
+ name,
+ expected_element: element_ty2,
+ third_arg: arg_tys[2]
+ });
}
}
@@ -1527,19 +1500,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
// * M: any integer width is supported, will be truncated to i1
// All types must be simd vector types
- require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty });
- require_simd!(
- arg_tys[1],
- InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] }
- );
- require_simd!(
- arg_tys[2],
- InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] }
- );
+ // The second argument must be a simd vector with an element type that's a pointer
+ // to the element type of the first argument
+ let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
+ let (element_len1, element_ty1) = require_simd!(arg_tys[1], SimdSecond);
+ let (element_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird);
// Of the same length:
- let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx());
- let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
require!(
in_len == element_len1,
InvalidMonomorphization::SecondArgumentLength {
@@ -1563,12 +1530,6 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}
);
- // The second argument must be a simd vector with an element type that's a pointer
- // to the element type of the first argument
- let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
- let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
- let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
-
require!(
matches!(
element_ty1.kind(),
@@ -1590,15 +1551,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
match element_ty2.kind() {
ty::Int(_) => (),
_ => {
- require!(
- false,
- InvalidMonomorphization::ThirdArgElementType {
- span,
- name,
- expected_element: element_ty2,
- third_arg: arg_tys[2]
- }
- );
+ return_error!(InvalidMonomorphization::ThirdArgElementType {
+ span,
+ name,
+ expected_element: element_ty2,
+ third_arg: arg_tys[2]
+ });
}
}
@@ -1794,8 +1752,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
bitwise_red!(simd_reduce_any: vector_reduce_or, true);
if name == sym::simd_cast_ptr {
- require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
- let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
+ let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
require!(
in_len == out_len,
InvalidMonomorphization::ReturnLengthInputType {
@@ -1843,8 +1800,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}
if name == sym::simd_expose_addr {
- require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
- let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
+ let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
require!(
in_len == out_len,
InvalidMonomorphization::ReturnLengthInputType {
@@ -1872,8 +1828,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}
if name == sym::simd_from_exposed_addr {
- require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
- let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
+ let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
require!(
in_len == out_len,
InvalidMonomorphization::ReturnLengthInputType {
@@ -1901,8 +1856,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}
if name == sym::simd_cast || name == sym::simd_as {
- require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
- let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
+ let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
require!(
in_len == out_len,
InvalidMonomorphization::ReturnLengthInputType {
@@ -1989,17 +1943,14 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}
_ => { /* Unsupported. Fallthrough. */ }
}
- require!(
- false,
- InvalidMonomorphization::UnsupportedCast {
- span,
- name,
- in_ty,
- in_elem,
- ret_ty,
- out_elem
- }
- );
+ return_error!(InvalidMonomorphization::UnsupportedCast {
+ span,
+ name,
+ in_ty,
+ in_elem,
+ ret_ty,
+ out_elem
+ });
}
macro_rules! arith_binary {
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
@@ -2010,8 +1961,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
})*
_ => {},
}
- require!(
- false,
+ return_error!(
InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
);
})*
@@ -2041,8 +1991,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
})*
_ => {},
}
- require!(
- false,
+ return_error!(
InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
);
})*
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index 9c5edd6bd..a4e027012 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -4,13 +4,17 @@
//!
//! This API is completely unstable and subject to change.
+#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![cfg_attr(not(bootstrap), feature(rustdoc_internals))]
+#![cfg_attr(not(bootstrap), doc(rust_logo))]
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(exact_size_is_empty)]
#![feature(extern_types)]
#![feature(hash_raw_entry)]
#![feature(iter_intersperse)]
#![feature(let_chains)]
+#![feature(min_specialization)]
#![feature(never_type)]
-#![feature(slice_group_by)]
#![feature(impl_trait_in_assoc_type)]
#![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)]
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index a038b3af0..7fc02a95b 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -969,7 +969,6 @@ extern "C" {
ConstantIndices: *const &'a Value,
NumIndices: c_uint,
) -> &'a Value;
- pub fn LLVMConstZExt<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstPtrToInt<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstIntToPtr<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstBitCast<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index 7c8ef67ff..cc4ccaf19 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -1,7 +1,7 @@
use crate::back::write::create_informational_target_machine;
use crate::errors::{
PossibleFeature, TargetFeatureDisableOrEnable, UnknownCTargetFeature,
- UnknownCTargetFeaturePrefix,
+ UnknownCTargetFeaturePrefix, UnstableCTargetFeature,
};
use crate::llvm;
use libc::c_int;
@@ -531,25 +531,34 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str
};
let feature = backend_feature_name(s)?;
- // Warn against use of LLVM specific feature names on the CLI.
- if diagnostics && !supported_features.iter().any(|&(v, _)| v == feature) {
- let rust_feature = supported_features.iter().find_map(|&(rust_feature, _)| {
- let llvm_features = to_llvm_features(sess, rust_feature);
- if llvm_features.contains(&feature) && !llvm_features.contains(&rust_feature) {
- Some(rust_feature)
+ // Warn against use of LLVM specific feature names and unstable features on the CLI.
+ if diagnostics {
+ let feature_state = supported_features.iter().find(|&&(v, _)| v == feature);
+ if feature_state.is_none() {
+ let rust_feature = supported_features.iter().find_map(|&(rust_feature, _)| {
+ let llvm_features = to_llvm_features(sess, rust_feature);
+ if llvm_features.contains(&feature)
+ && !llvm_features.contains(&rust_feature)
+ {
+ Some(rust_feature)
+ } else {
+ None
+ }
+ });
+ let unknown_feature = if let Some(rust_feature) = rust_feature {
+ UnknownCTargetFeature {
+ feature,
+ rust_feature: PossibleFeature::Some { rust_feature },
+ }
} else {
- None
- }
- });
- let unknown_feature = if let Some(rust_feature) = rust_feature {
- UnknownCTargetFeature {
- feature,
- rust_feature: PossibleFeature::Some { rust_feature },
- }
- } else {
- UnknownCTargetFeature { feature, rust_feature: PossibleFeature::None }
- };
- sess.emit_warning(unknown_feature);
+ UnknownCTargetFeature { feature, rust_feature: PossibleFeature::None }
+ };
+ sess.emit_warning(unknown_feature);
+ } else if feature_state.is_some_and(|(_name, feature_gate)| feature_gate.is_some())
+ {
+ // An unstable feature. Warn about using it.
+ sess.emit_warning(UnstableCTargetFeature { feature });
+ }
}
if diagnostics {
diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs
index 38e822056..01e823396 100644
--- a/compiler/rustc_codegen_llvm/src/mono_item.rs
+++ b/compiler/rustc_codegen_llvm/src/mono_item.rs
@@ -6,7 +6,6 @@ use crate::llvm;
use crate::type_of::LayoutLlvmExt;
use rustc_codegen_ssa::traits::*;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
-pub use rustc_middle::mir::mono::MonoItem;
use rustc_middle::mir::mono::{Linkage, Visibility};
use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
use rustc_middle::ty::{self, Instance, TypeVisitableExt};
diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs
index 8db6195d9..06b770367 100644
--- a/compiler/rustc_codegen_llvm/src/type_.rs
+++ b/compiler/rustc_codegen_llvm/src/type_.rs
@@ -112,7 +112,7 @@ impl<'ll> CodegenCx<'ll, '_> {
}
}
- /// Return a LLVM type that has at most the required alignment,
+ /// Return an LLVM type that has at most the required alignment,
/// and exactly the required size, as a best-effort padding array.
pub(crate) fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type {
let unit = Integer::approximate_align(self, align);
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index dcc62d314..712b6ed53 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -42,7 +42,7 @@ fn uncached_llvm_type<'a, 'tcx>(
// FIXME(eddyb) producing readable type names for trait objects can result
// in problematically distinct types due to HRTB and subtyping (see #47638).
// ty::Dynamic(..) |
- ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Generator(..) | ty::Str
+ ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Coroutine(..) | ty::Str
// For performance reasons we use names only when emitting LLVM IR.
if !cx.sess().fewer_names() =>
{
@@ -54,10 +54,10 @@ fn uncached_llvm_type<'a, 'tcx>(
write!(&mut name, "::{}", def.variant(index).name).unwrap();
}
}
- if let (&ty::Generator(_, _, _), &Variants::Single { index }) =
+ if let (&ty::Coroutine(_, _, _), &Variants::Single { index }) =
(layout.ty.kind(), &layout.variants)
{
- write!(&mut name, "::{}", ty::GeneratorArgs::variant_name(index)).unwrap();
+ write!(&mut name, "::{}", ty::CoroutineArgs::variant_name(index)).unwrap();
}
Some(name)
}
@@ -397,7 +397,12 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
// extracts all the individual values.
let ety = element.llvm_type(cx);
- return Some(cx.type_vector(ety, *count));
+ if *count == 1 {
+ // Emitting `<1 x T>` would be silly; just use the scalar.
+ return Some(ety);
+ } else {
+ return Some(cx.type_vector(ety, *count));
+ }
}
// FIXME: The above only handled integer arrays; surely more things