summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_llvm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:57:31 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:57:31 +0000
commitdc0db358abe19481e475e10c32149b53370f1a1c (patch)
treeab8ce99c4b255ce46f99ef402c27916055b899ee /compiler/rustc_codegen_llvm
parentReleasing progress-linux version 1.71.1+dfsg1-2~progress7.99u1. (diff)
downloadrustc-dc0db358abe19481e475e10c32149b53370f1a1c.tar.xz
rustc-dc0db358abe19481e475e10c32149b53370f1a1c.zip
Merging upstream version 1.72.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_llvm')
-rw-r--r--compiler/rustc_codegen_llvm/messages.ftl8
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/attributes.rs8
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs10
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs62
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs45
-rw-r--r--compiler/rustc_codegen_llvm/src/common.rs53
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs19
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs44
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs89
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs348
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs82
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs8
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs17
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs12
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs13
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs6
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/utils.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/errors.rs12
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs114
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs20
-rw-r--r--compiler/rustc_codegen_llvm/src/type_.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs45
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs2
28 files changed, 876 insertions, 168 deletions
diff --git a/compiler/rustc_codegen_llvm/messages.ftl b/compiler/rustc_codegen_llvm/messages.ftl
index 55622fdb2..de1622951 100644
--- a/compiler/rustc_codegen_llvm/messages.ftl
+++ b/compiler/rustc_codegen_llvm/messages.ftl
@@ -20,8 +20,12 @@ codegen_llvm_error_writing_def_file =
codegen_llvm_from_llvm_diag = {$message}
codegen_llvm_from_llvm_optimization_diag = {$filename}:{$line}:{$column} {$pass_name} ({$kind}): {$message}
-codegen_llvm_invalid_minimum_alignment =
- invalid minimum global alignment: {$err}
+
+codegen_llvm_invalid_minimum_alignment_not_power_of_two =
+ invalid minimum global alignment: {$align} is not power of 2
+
+codegen_llvm_invalid_minimum_alignment_too_large =
+ invalid minimum global alignment: {$align} is too large
codegen_llvm_load_bitcode = failed to load bitcode of module "{$name}"
codegen_llvm_load_bitcode_with_llvm_err = failed to load bitcode of module "{$name}": {$llvm_err}
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index 28be6d033..d221bad28 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -351,7 +351,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
continue;
}
PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
- let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
+ let ptr_ty = Ty::new_mut_ptr(cx.tcx, arg.layout.ty);
let ptr_layout = cx.layout_of(ptr_ty);
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index 651d644eb..39275272e 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -1,7 +1,6 @@
//! Set and unset common attributes on LLVM values.
use rustc_codegen_ssa::traits::*;
-use rustc_data_structures::small_str::SmallStr;
use rustc_hir::def_id::DefId;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::ty::{self, TyCtxt};
@@ -88,6 +87,9 @@ pub fn sanitize_attrs<'ll>(
attrs.push(llvm::AttributeKind::SanitizeMemTag.create_attr(cx.llcx));
}
+ if enabled.contains(SanitizerSet::SAFESTACK) {
+ attrs.push(llvm::AttributeKind::SanitizeSafeStack.create_attr(cx.llcx));
+ }
attrs
}
@@ -478,8 +480,8 @@ pub fn from_fn_attrs<'ll, 'tcx>(
let global_features = cx.tcx.global_backend_features(()).iter().map(|s| s.as_str());
let function_features = function_features.iter().map(|s| s.as_str());
- let target_features =
- global_features.chain(function_features).intersperse(",").collect::<SmallStr<1024>>();
+ let target_features: String =
+ global_features.chain(function_features).intersperse(",").collect();
if !target_features.is_empty() {
to_add.push(llvm::CreateAttrStringValue(cx.llcx, "target-features", &target_features));
}
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index 604f68eb6..d7dd98d79 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -1,4 +1,4 @@
-use crate::back::write::{self, save_temp_bitcode, DiagnosticHandlers};
+use crate::back::write::{self, save_temp_bitcode, CodegenDiagnosticsStage, DiagnosticHandlers};
use crate::errors::{
DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib,
};
@@ -302,7 +302,13 @@ fn fat_lto(
// The linking steps below may produce errors and diagnostics within LLVM
// which we'd like to handle and print, so set up our diagnostic handlers
// (which get unregistered when they go out of scope below).
- let _handler = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+ let _handler = DiagnosticHandlers::new(
+ cgcx,
+ diag_handler,
+ llcx,
+ &module,
+ CodegenDiagnosticsStage::LTO,
+ );
// For all other modules we codegened we'll need to link them into our own
// bitcode. All modules were codegened in their own LLVM context, however,
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index ca2eab28f..0f5e97544 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -268,6 +268,16 @@ pub(crate) fn save_temp_bitcode(
}
}
+/// In what context is a dignostic handler being attached to a codegen unit?
+pub enum CodegenDiagnosticsStage {
+ /// Prelink optimization stage.
+ Opt,
+ /// LTO/ThinLTO postlink optimization stage.
+ LTO,
+ /// Code generation.
+ Codegen,
+}
+
pub struct DiagnosticHandlers<'a> {
data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a Handler),
llcx: &'a llvm::Context,
@@ -279,6 +289,8 @@ impl<'a> DiagnosticHandlers<'a> {
cgcx: &'a CodegenContext<LlvmCodegenBackend>,
handler: &'a Handler,
llcx: &'a llvm::Context,
+ module: &ModuleCodegen<ModuleLlvm>,
+ stage: CodegenDiagnosticsStage,
) -> Self {
let remark_passes_all: bool;
let remark_passes: Vec<CString>;
@@ -295,6 +307,20 @@ impl<'a> DiagnosticHandlers<'a> {
};
let remark_passes: Vec<*const c_char> =
remark_passes.iter().map(|name: &CString| name.as_ptr()).collect();
+ let remark_file = cgcx
+ .remark_dir
+ .as_ref()
+ // Use the .opt.yaml file suffix, which is supported by LLVM's opt-viewer.
+ .map(|dir| {
+ let stage_suffix = match stage {
+ CodegenDiagnosticsStage::Codegen => "codegen",
+ CodegenDiagnosticsStage::Opt => "opt",
+ CodegenDiagnosticsStage::LTO => "lto",
+ };
+ dir.join(format!("{}.{stage_suffix}.opt.yaml", module.name))
+ })
+ .and_then(|dir| dir.to_str().and_then(|p| CString::new(p).ok()));
+
let data = Box::into_raw(Box::new((cgcx, handler)));
unsafe {
let old_handler = llvm::LLVMRustContextGetDiagnosticHandler(llcx);
@@ -305,6 +331,9 @@ impl<'a> DiagnosticHandlers<'a> {
remark_passes_all,
remark_passes.as_ptr(),
remark_passes.len(),
+ // The `as_ref()` is important here, otherwise the `CString` will be dropped
+ // too soon!
+ remark_file.as_ref().map(|dir| dir.as_ptr()).unwrap_or(std::ptr::null()),
);
DiagnosticHandlers { data, llcx, old_handler }
}
@@ -523,7 +552,8 @@ pub(crate) unsafe fn optimize(
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
- let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+ let _handlers =
+ DiagnosticHandlers::new(cgcx, diag_handler, llcx, module, CodegenDiagnosticsStage::Opt);
let module_name = module.name.clone();
let module_name = Some(&module_name[..]);
@@ -582,7 +612,13 @@ pub(crate) unsafe fn codegen(
let tm = &*module.module_llvm.tm;
let module_name = module.name.clone();
let module_name = Some(&module_name[..]);
- let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+ let _handlers = DiagnosticHandlers::new(
+ cgcx,
+ diag_handler,
+ llcx,
+ &module,
+ CodegenDiagnosticsStage::Codegen,
+ );
if cgcx.msvc_imps_needed {
create_msvc_imps(cgcx, llcx, llmod);
@@ -775,7 +811,6 @@ pub(crate) unsafe fn codegen(
}
record_llvm_cgu_instructions_stats(&cgcx.prof, llmod);
- drop(handlers);
}
// `.dwo` files are only emitted if:
@@ -875,14 +910,19 @@ unsafe fn embed_bitcode(
// passed though then these sections will show up in the final output.
// Additionally the flag that we need to set here is `SHF_EXCLUDE`.
//
+ // * XCOFF - AIX linker ignores content in .ipa and .info if no auxiliary
+ // symbol associated with these sections.
+ //
// Unfortunately, LLVM provides no way to set custom section flags. For ELF
// and COFF we emit the sections using module level inline assembly for that
// reason (see issue #90326 for historical background).
+ let is_aix = cgcx.opts.target_triple.triple().contains("-aix");
let is_apple = cgcx.opts.target_triple.triple().contains("-ios")
|| cgcx.opts.target_triple.triple().contains("-darwin")
|| cgcx.opts.target_triple.triple().contains("-tvos")
|| cgcx.opts.target_triple.triple().contains("-watchos");
if is_apple
+ || is_aix
|| cgcx.opts.target_triple.triple().starts_with("wasm")
|| cgcx.opts.target_triple.triple().starts_with("asmjs")
{
@@ -895,7 +935,13 @@ unsafe fn embed_bitcode(
);
llvm::LLVMSetInitializer(llglobal, llconst);
- let section = if is_apple { "__LLVM,__bitcode\0" } else { ".llvmbc\0" };
+ let section = if is_apple {
+ "__LLVM,__bitcode\0"
+ } else if is_aix {
+ ".ipa\0"
+ } else {
+ ".llvmbc\0"
+ };
llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
@@ -907,7 +953,13 @@ unsafe fn embed_bitcode(
"rustc.embedded.cmdline\0".as_ptr().cast(),
);
llvm::LLVMSetInitializer(llglobal, llconst);
- let section = if is_apple { "__LLVM,__cmdline\0" } else { ".llvmcmd\0" };
+ let section = if is_apple {
+ "__LLVM,__cmdline\0"
+ } else if is_aix {
+ ".info\0"
+ } else {
+ ".llvmcmd\0"
+ };
llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
} else {
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index 4d0bcd53d..d55992bf0 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -24,6 +24,7 @@ use rustc_span::Span;
use rustc_symbol_mangling::typeid::{kcfi_typeid_for_fnabi, typeid_for_fnabi, TypeIdOptions};
use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange};
use rustc_target::spec::{HasTargetSpec, SanitizerSet, Target};
+use smallvec::SmallVec;
use std::borrow::Cow;
use std::ffi::CStr;
use std::iter;
@@ -230,7 +231,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let args = self.check_call("invoke", llty, llfn, args);
let funclet_bundle = funclet.map(|funclet| funclet.bundle());
let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
- let mut bundles = vec![funclet_bundle];
+ let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
+ if let Some(funclet_bundle) = funclet_bundle {
+ bundles.push(funclet_bundle);
+ }
// Emit CFI pointer type membership test
self.cfi_type_test(fn_attrs, fn_abi, llfn);
@@ -238,9 +242,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
// Emit KCFI operand bundle
let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn);
let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
- bundles.push(kcfi_bundle);
+ if let Some(kcfi_bundle) = kcfi_bundle {
+ bundles.push(kcfi_bundle);
+ }
- bundles.retain(|bundle| bundle.is_some());
let invoke = unsafe {
llvm::LLVMRustBuildInvoke(
self.llbuilder,
@@ -486,7 +491,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
if place.layout.is_zst() {
- return OperandRef::new_zst(self, place.layout);
+ return OperandRef::zero_sized(place.layout);
}
#[instrument(level = "trace", skip(bx))]
@@ -577,8 +582,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
) {
let zero = self.const_usize(0);
let count = self.const_usize(count);
- let start = dest.project_index(self, zero).llval;
- let end = dest.project_index(self, count).llval;
let header_bb = self.append_sibling_block("repeat_loop_header");
let body_bb = self.append_sibling_block("repeat_loop_body");
@@ -587,24 +590,18 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
self.br(header_bb);
let mut header_bx = Self::build(self.cx, header_bb);
- let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]);
+ let i = header_bx.phi(self.val_ty(zero), &[zero], &[self.llbb()]);
- let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
+ let keep_going = header_bx.icmp(IntPredicate::IntULT, i, count);
header_bx.cond_br(keep_going, body_bb, next_bb);
let mut body_bx = Self::build(self.cx, body_bb);
- let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
- cg_elem
- .val
- .store(&mut body_bx, PlaceRef::new_sized_aligned(current, cg_elem.layout, align));
-
- let next = body_bx.inbounds_gep(
- self.backend_type(cg_elem.layout),
- current,
- &[self.const_usize(1)],
- );
+ let dest_elem = dest.project_index(&mut body_bx, i);
+ cg_elem.val.store(&mut body_bx, dest_elem);
+
+ let next = body_bx.unchecked_uadd(i, self.const_usize(1));
body_bx.br(header_bb);
- header_bx.add_incoming_to_phi(current, next, body_bb);
+ header_bx.add_incoming_to_phi(i, next, body_bb);
*self = Self::build(self.cx, next_bb);
}
@@ -1197,7 +1194,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let args = self.check_call("call", llty, llfn, args);
let funclet_bundle = funclet.map(|funclet| funclet.bundle());
let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
- let mut bundles = vec![funclet_bundle];
+ let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
+ if let Some(funclet_bundle) = funclet_bundle {
+ bundles.push(funclet_bundle);
+ }
// Emit CFI pointer type membership test
self.cfi_type_test(fn_attrs, fn_abi, llfn);
@@ -1205,9 +1205,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
// Emit KCFI operand bundle
let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn);
let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
- bundles.push(kcfi_bundle);
+ if let Some(kcfi_bundle) = kcfi_bundle {
+ bundles.push(kcfi_bundle);
+ }
- bundles.retain(|bundle| bundle.is_some());
let call = unsafe {
llvm::LLVMRustBuildCall(
self.llbuilder,
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
index 9127fba38..a2db59bd6 100644
--- a/compiler/rustc_codegen_llvm/src/common.rs
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -8,16 +8,15 @@ use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
use rustc_ast::Mutability;
-use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::*;
use rustc_data_structures::stable_hasher::{Hash128, HashStable, StableHasher};
use rustc_hir::def_id::DefId;
use rustc_middle::bug;
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
-use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::ty::TyCtxt;
use rustc_session::cstore::{DllCallingConvention, DllImport, PeImportNameType};
-use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer, Size};
+use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer};
use rustc_target::spec::Target;
use libc::{c_char, c_uint};
@@ -169,6 +168,10 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
self.const_uint(self.type_i64(), i)
}
+ fn const_u128(&self, i: u128) -> &'ll Value {
+ self.const_uint_big(self.type_i128(), i)
+ }
+
fn const_usize(&self, i: u64) -> &'ll Value {
let bit_size = self.data_layout().pointer_size.bits();
if bit_size < 64 {
@@ -307,38 +310,24 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
const_alloc_to_llvm(self, alloc)
}
- fn from_const_alloc(
- &self,
- layout: TyAndLayout<'tcx>,
- alloc: ConstAllocation<'tcx>,
- offset: Size,
- ) -> PlaceRef<'tcx, &'ll Value> {
- let alloc_align = alloc.inner().align;
- assert_eq!(alloc_align, layout.align.abi);
- let llty = self.type_ptr_to(layout.llvm_type(self));
- let llval = if layout.size == Size::ZERO {
- let llval = self.const_usize(alloc_align.bytes());
- unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
- } else {
- let init = const_alloc_to_llvm(self, alloc);
- let base_addr = self.static_addr_of(init, alloc_align, None);
-
- let llval = unsafe {
- llvm::LLVMRustConstInBoundsGEP2(
- self.type_i8(),
- self.const_bitcast(base_addr, self.type_i8p()),
- &self.const_usize(offset.bytes()),
- 1,
- )
- };
- self.const_bitcast(llval, llty)
- };
- PlaceRef::new_sized(llval, layout)
- }
-
fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
consts::ptrcast(val, ty)
}
+
+ fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
+ self.const_bitcast(val, ty)
+ }
+
+ fn const_ptr_byte_offset(&self, base_addr: Self::Value, offset: abi::Size) -> Self::Value {
+ unsafe {
+ llvm::LLVMRustConstInBoundsGEP2(
+ self.type_i8(),
+ self.const_bitcast(base_addr, self.type_i8p()),
+ &self.const_usize(offset.bytes()),
+ 1,
+ )
+ }
+ }
}
/// Get the [LLVM type][Type] of a [`Value`].
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index 940358acd..df52f50f8 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -1,7 +1,9 @@
use crate::base;
use crate::common::{self, CodegenCx};
use crate::debuginfo;
-use crate::errors::{InvalidMinimumAlignment, SymbolAlreadyDefined};
+use crate::errors::{
+ InvalidMinimumAlignmentNotPowerOfTwo, InvalidMinimumAlignmentTooLarge, SymbolAlreadyDefined,
+};
use crate::llvm::{self, True};
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
@@ -19,7 +21,9 @@ use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::ty::{self, Instance, Ty};
use rustc_middle::{bug, span_bug};
use rustc_session::config::Lto;
-use rustc_target::abi::{Align, HasDataLayout, Primitive, Scalar, Size, WrappingRange};
+use rustc_target::abi::{
+ Align, AlignFromBytesError, HasDataLayout, Primitive, Scalar, Size, WrappingRange,
+};
use std::ops::Range;
pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<'_>) -> &'ll Value {
@@ -129,9 +133,14 @@ fn set_global_alignment<'ll>(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align:
if let Some(min) = cx.sess().target.min_global_align {
match Align::from_bits(min) {
Ok(min) => align = align.max(min),
- Err(err) => {
- cx.sess().emit_err(InvalidMinimumAlignment { err });
- }
+ Err(err) => match err {
+ AlignFromBytesError::NotPowerOfTwo(align) => {
+ cx.sess().emit_err(InvalidMinimumAlignmentNotPowerOfTwo { align });
+ }
+ AlignFromBytesError::TooLarge(align) => {
+ cx.sess().emit_err(InvalidMinimumAlignmentTooLarge { align });
+ }
+ },
}
}
unsafe {
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 83101a854..e1e0a4428 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -9,7 +9,7 @@ use crate::type_::Type;
use crate::value::Value;
use cstr::cstr;
-use rustc_codegen_ssa::base::wants_msvc_seh;
+use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh};
use rustc_codegen_ssa::traits::*;
use rustc_data_structures::base_n;
use rustc_data_structures::fx::FxHashMap;
@@ -528,19 +528,28 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
if let Some(llpersonality) = self.eh_personality.get() {
return llpersonality;
}
+
+ let name = if wants_msvc_seh(self.sess()) {
+ Some("__CxxFrameHandler3")
+ } else if wants_wasm_eh(self.sess()) {
+ // LLVM specifically tests for the name of the personality function
+ // There is no need for this function to exist anywhere, it will
+ // not be called. However, its name has to be "__gxx_wasm_personality_v0"
+ // for native wasm exceptions.
+ Some("__gxx_wasm_personality_v0")
+ } else {
+ None
+ };
+
let tcx = self.tcx;
let llfn = match tcx.lang_items().eh_personality() {
- Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr(
+ Some(def_id) if name.is_none() => self.get_fn_addr(
ty::Instance::resolve(tcx, ty::ParamEnv::reveal_all(), def_id, ty::List::empty())
.unwrap()
.unwrap(),
),
_ => {
- let name = if wants_msvc_seh(self.sess()) {
- "__CxxFrameHandler3"
- } else {
- "rust_eh_personality"
- };
+ let name = name.unwrap_or("rust_eh_personality");
if let Some(llfn) = self.get_declared_value(name) {
llfn
} else {
@@ -658,6 +667,10 @@ impl<'ll> CodegenCx<'ll, '_> {
let t_f32 = self.type_f32();
let t_f64 = self.type_f64();
let t_metadata = self.type_metadata();
+ let t_token = self.type_token();
+
+ ifn!("llvm.wasm.get.exception", fn(t_token) -> i8p);
+ ifn!("llvm.wasm.get.ehselector", fn(t_token) -> t_i32);
ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32);
@@ -969,9 +982,9 @@ impl<'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
#[inline]
fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
if let LayoutError::SizeOverflow(_) = err {
- self.sess().emit_fatal(Spanned { span, node: err })
+ self.sess().emit_fatal(Spanned { span, node: err.into_diagnostic() })
} else {
- span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
+ span_bug!(span, "failed to get layout for `{ty}`: {err:?}")
}
}
}
@@ -991,21 +1004,12 @@ impl<'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
} else {
match fn_abi_request {
FnAbiRequest::OfFnPtr { sig, extra_args } => {
- span_bug!(
- span,
- "`fn_abi_of_fn_ptr({}, {:?})` failed: {}",
- sig,
- extra_args,
- err
- );
+ span_bug!(span, "`fn_abi_of_fn_ptr({sig}, {extra_args:?})` failed: {err:?}",);
}
FnAbiRequest::OfInstance { instance, extra_args } => {
span_bug!(
span,
- "`fn_abi_of_instance({}, {:?})` failed: {}",
- instance,
- extra_args,
- err
+ "`fn_abi_of_instance({instance}, {extra_args:?})` failed: {err:?}",
);
}
}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
new file mode 100644
index 000000000..1791ce4b3
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
@@ -0,0 +1,89 @@
+use rustc_middle::mir::coverage::{CounterValueReference, MappedExpressionIndex};
+
+/// Must match the layout of `LLVMRustCounterKind`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub enum CounterKind {
+ Zero = 0,
+ CounterValueReference = 1,
+ Expression = 2,
+}
+
+/// A reference to an instance of an abstract "counter" that will yield a value in a coverage
+/// report. Note that `id` has different interpretations, depending on the `kind`:
+/// * For `CounterKind::Zero`, `id` is assumed to be `0`
+/// * For `CounterKind::CounterValueReference`, `id` matches the `counter_id` of the injected
+/// instrumentation counter (the `index` argument to the LLVM intrinsic
+/// `instrprof.increment()`)
+/// * For `CounterKind::Expression`, `id` is the index into the coverage map's array of
+/// counter expressions.
+///
+/// Corresponds to struct `llvm::coverage::Counter`.
+///
+/// Must match the layout of `LLVMRustCounter`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub struct Counter {
+ // Important: The layout (order and types of fields) must match its C++ counterpart.
+ pub kind: CounterKind,
+ id: u32,
+}
+
+impl Counter {
+ /// Constructs a new `Counter` of kind `Zero`. For this `CounterKind`, the
+ /// `id` is not used.
+ pub fn zero() -> Self {
+ Self { kind: CounterKind::Zero, id: 0 }
+ }
+
+ /// Constructs a new `Counter` of kind `CounterValueReference`, and converts
+ /// the given 1-based counter_id to the required 0-based equivalent for
+ /// the `Counter` encoding.
+ pub fn counter_value_reference(counter_id: CounterValueReference) -> Self {
+ Self { kind: CounterKind::CounterValueReference, id: counter_id.zero_based_index() }
+ }
+
+ /// Constructs a new `Counter` of kind `Expression`.
+ pub fn expression(mapped_expression_index: MappedExpressionIndex) -> Self {
+ Self { kind: CounterKind::Expression, id: mapped_expression_index.into() }
+ }
+
+ /// Returns true if the `Counter` kind is `Zero`.
+ pub fn is_zero(&self) -> bool {
+ matches!(self.kind, CounterKind::Zero)
+ }
+
+ /// An explicitly-named function to get the ID value, making it more obvious
+ /// that the stored value is now 0-based.
+ pub fn zero_based_id(&self) -> u32 {
+ debug_assert!(!self.is_zero(), "`id` is undefined for CounterKind::Zero");
+ self.id
+ }
+}
+
+/// Corresponds to enum `llvm::coverage::CounterExpression::ExprKind`.
+///
+/// Must match the layout of `LLVMRustCounterExprKind`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub enum ExprKind {
+ Subtract = 0,
+ Add = 1,
+}
+
+/// Corresponds to struct `llvm::coverage::CounterExpression`.
+///
+/// Must match the layout of `LLVMRustCounterExpression`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub struct CounterExpression {
+ pub kind: ExprKind,
+ pub lhs: Counter,
+ pub rhs: Counter,
+}
+
+impl CounterExpression {
+ pub fn new(lhs: Counter, kind: ExprKind, rhs: Counter) -> Self {
+ Self { kind, lhs, rhs }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
new file mode 100644
index 000000000..06844afd6
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
@@ -0,0 +1,348 @@
+pub use super::ffi::*;
+
+use rustc_index::{IndexSlice, IndexVec};
+use rustc_middle::bug;
+use rustc_middle::mir::coverage::{
+ CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId,
+ InjectedExpressionIndex, MappedExpressionIndex, Op,
+};
+use rustc_middle::ty::Instance;
+use rustc_middle::ty::TyCtxt;
+
+#[derive(Clone, Debug, PartialEq)]
+pub struct Expression {
+ lhs: ExpressionOperandId,
+ op: Op,
+ rhs: ExpressionOperandId,
+ region: Option<CodeRegion>,
+}
+
+/// Collects all of the coverage regions associated with (a) injected counters, (b) counter
+/// expressions (additions or subtraction), and (c) unreachable regions (always counted as zero),
+/// for a given Function. Counters and counter expressions have non-overlapping `id`s because they
+/// can both be operands in an expression. This struct also stores the `function_source_hash`,
+/// computed during instrumentation, and forwarded with counters.
+///
+/// Note, it may be important to understand LLVM's definitions of `unreachable` regions versus "gap
+/// regions" (or "gap areas"). A gap region is a code region within a counted region (either counter
+/// or expression), but the line or lines in the gap region are not executable (such as lines with
+/// only whitespace or comments). According to LLVM Code Coverage Mapping documentation, "A count
+/// for a gap area is only used as the line execution count if there are no other regions on a
+/// line."
+#[derive(Debug)]
+pub struct FunctionCoverage<'tcx> {
+ instance: Instance<'tcx>,
+ source_hash: u64,
+ is_used: bool,
+ counters: IndexVec<CounterValueReference, Option<CodeRegion>>,
+ expressions: IndexVec<InjectedExpressionIndex, Option<Expression>>,
+ unreachable_regions: Vec<CodeRegion>,
+}
+
+impl<'tcx> FunctionCoverage<'tcx> {
+ /// Creates a new set of coverage data for a used (called) function.
+ pub fn new(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+ Self::create(tcx, instance, true)
+ }
+
+ /// Creates a new set of coverage data for an unused (never called) function.
+ pub fn unused(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+ Self::create(tcx, instance, false)
+ }
+
+ fn create(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>, is_used: bool) -> Self {
+ let coverageinfo = tcx.coverageinfo(instance.def);
+ debug!(
+ "FunctionCoverage::create(instance={:?}) has coverageinfo={:?}. is_used={}",
+ instance, coverageinfo, is_used
+ );
+ Self {
+ instance,
+ source_hash: 0, // will be set with the first `add_counter()`
+ is_used,
+ counters: IndexVec::from_elem_n(None, coverageinfo.num_counters as usize),
+ expressions: IndexVec::from_elem_n(None, coverageinfo.num_expressions as usize),
+ unreachable_regions: Vec::new(),
+ }
+ }
+
+ /// Returns true for a used (called) function, and false for an unused function.
+ pub fn is_used(&self) -> bool {
+ self.is_used
+ }
+
+ /// Sets the function source hash value. If called multiple times for the same function, all
+ /// calls should have the same hash value.
+ pub fn set_function_source_hash(&mut self, source_hash: u64) {
+ if self.source_hash == 0 {
+ self.source_hash = source_hash;
+ } else {
+ debug_assert_eq!(source_hash, self.source_hash);
+ }
+ }
+
+ /// Adds a code region to be counted by an injected counter intrinsic.
+ pub fn add_counter(&mut self, id: CounterValueReference, region: CodeRegion) {
+ if let Some(previous_region) = self.counters[id].replace(region.clone()) {
+ assert_eq!(previous_region, region, "add_counter: code region for id changed");
+ }
+ }
+
+ /// Both counters and "counter expressions" (or simply, "expressions") can be operands in other
+ /// expressions. Expression IDs start from `u32::MAX` and go down, so the range of expression
+ /// IDs will not overlap with the range of counter IDs. Counters and expressions can be added in
+ /// any order, and expressions can still be assigned contiguous (though descending) IDs, without
+ /// knowing what the last counter ID will be.
+ ///
+ /// When storing the expression data in the `expressions` vector in the `FunctionCoverage`
+ /// struct, its vector index is computed, from the given expression ID, by subtracting from
+ /// `u32::MAX`.
+ ///
+ /// Since the expression operands (`lhs` and `rhs`) can reference either counters or
+ /// expressions, an operand that references an expression also uses its original ID, descending
+ /// from `u32::MAX`. Theses operands are translated only during code generation, after all
+ /// counters and expressions have been added.
+ pub fn add_counter_expression(
+ &mut self,
+ expression_id: InjectedExpressionId,
+ lhs: ExpressionOperandId,
+ op: Op,
+ rhs: ExpressionOperandId,
+ region: Option<CodeRegion>,
+ ) {
+ debug!(
+ "add_counter_expression({:?}, lhs={:?}, op={:?}, rhs={:?} at {:?}",
+ expression_id, lhs, op, rhs, region
+ );
+ let expression_index = self.expression_index(u32::from(expression_id));
+ debug_assert!(
+ expression_index.as_usize() < self.expressions.len(),
+ "expression_index {} is out of range for expressions.len() = {}
+ for {:?}",
+ expression_index.as_usize(),
+ self.expressions.len(),
+ self,
+ );
+ if let Some(previous_expression) = self.expressions[expression_index].replace(Expression {
+ lhs,
+ op,
+ rhs,
+ region: region.clone(),
+ }) {
+ assert_eq!(
+ previous_expression,
+ Expression { lhs, op, rhs, region },
+ "add_counter_expression: expression for id changed"
+ );
+ }
+ }
+
+ /// Add a region that will be marked as "unreachable", with a constant "zero counter".
+ pub fn add_unreachable_region(&mut self, region: CodeRegion) {
+ self.unreachable_regions.push(region)
+ }
+
+ /// Return the source hash, generated from the HIR node structure, and used to indicate whether
+ /// or not the source code structure changed between different compilations.
+ pub fn source_hash(&self) -> u64 {
+ self.source_hash
+ }
+
+ /// Generate an array of CounterExpressions, and an iterator over all `Counter`s and their
+ /// associated `Regions` (from which the LLVM-specific `CoverageMapGenerator` will create
+ /// `CounterMappingRegion`s.
+ pub fn get_expressions_and_counter_regions(
+ &self,
+ ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &CodeRegion)>) {
+ assert!(
+ self.source_hash != 0 || !self.is_used,
+ "No counters provided the source_hash for used function: {:?}",
+ self.instance
+ );
+
+ let counter_regions = self.counter_regions();
+ let (counter_expressions, expression_regions) = self.expressions_with_regions();
+ let unreachable_regions = self.unreachable_regions();
+
+ let counter_regions =
+ counter_regions.chain(expression_regions.into_iter().chain(unreachable_regions));
+ (counter_expressions, counter_regions)
+ }
+
+ fn counter_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
+ self.counters.iter_enumerated().filter_map(|(index, entry)| {
+ // Option::map() will return None to filter out missing counters. This may happen
+ // if, for example, a MIR-instrumented counter is removed during an optimization.
+ entry.as_ref().map(|region| (Counter::counter_value_reference(index), region))
+ })
+ }
+
+ fn expressions_with_regions(
+ &self,
+ ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &CodeRegion)>) {
+ let mut counter_expressions = Vec::with_capacity(self.expressions.len());
+ let mut expression_regions = Vec::with_capacity(self.expressions.len());
+ let mut new_indexes = IndexVec::from_elem_n(None, self.expressions.len());
+
+ // This closure converts any `Expression` operand (`lhs` or `rhs` of the `Op::Add` or
+ // `Op::Subtract` operation) into its native `llvm::coverage::Counter::CounterKind` type
+ // and value. Operand ID value `0` maps to `CounterKind::Zero`; values in the known range
+ // of injected LLVM counters map to `CounterKind::CounterValueReference` (and the value
+ // matches the injected counter index); and any other value is converted into a
+ // `CounterKind::Expression` with the expression's `new_index`.
+ //
+ // Expressions will be returned from this function in a sequential vector (array) of
+ // `CounterExpression`, so the expression IDs must be mapped from their original,
+ // potentially sparse set of indexes, originally in reverse order from `u32::MAX`.
+ //
+ // An `Expression` as an operand will have already been encountered as an `Expression` with
+ // operands, so its new_index will already have been generated (as a 1-up index value).
+ // (If an `Expression` as an operand does not have a corresponding new_index, it was
+ // probably optimized out, after the expression was injected into the MIR, so it will
+ // get a `CounterKind::Zero` instead.)
+ //
+ // In other words, an `Expression`s at any given index can include other expressions as
+ // operands, but expression operands can only come from the subset of expressions having
+ // `expression_index`s lower than the referencing `Expression`. Therefore, it is
+ // reasonable to look up the new index of an expression operand while the `new_indexes`
+ // vector is only complete up to the current `ExpressionIndex`.
+ let id_to_counter = |new_indexes: &IndexSlice<
+ InjectedExpressionIndex,
+ Option<MappedExpressionIndex>,
+ >,
+ id: ExpressionOperandId| {
+ if id == ExpressionOperandId::ZERO {
+ Some(Counter::zero())
+ } else if id.index() < self.counters.len() {
+ debug_assert!(
+ id.index() > 0,
+ "ExpressionOperandId indexes for counters are 1-based, but this id={}",
+ id.index()
+ );
+ // Note: Some codegen-injected Counters may be only referenced by `Expression`s,
+ // and may not have their own `CodeRegion`s,
+ let index = CounterValueReference::from(id.index());
+ // Note, the conversion to LLVM `Counter` adjusts the index to be zero-based.
+ Some(Counter::counter_value_reference(index))
+ } else {
+ let index = self.expression_index(u32::from(id));
+ self.expressions
+ .get(index)
+ .expect("expression id is out of range")
+ .as_ref()
+ // If an expression was optimized out, assume it would have produced a count
+ // of zero. This ensures that expressions dependent on optimized-out
+ // expressions are still valid.
+ .map_or(Some(Counter::zero()), |_| new_indexes[index].map(Counter::expression))
+ }
+ };
+
+ for (original_index, expression) in
+ self.expressions.iter_enumerated().filter_map(|(original_index, entry)| {
+ // Option::map() will return None to filter out missing expressions. This may happen
+ // if, for example, a MIR-instrumented expression is removed during an optimization.
+ entry.as_ref().map(|expression| (original_index, expression))
+ })
+ {
+ let optional_region = &expression.region;
+ let Expression { lhs, op, rhs, .. } = *expression;
+
+ if let Some(Some((lhs_counter, mut rhs_counter))) = id_to_counter(&new_indexes, lhs)
+ .map(|lhs_counter| {
+ id_to_counter(&new_indexes, rhs).map(|rhs_counter| (lhs_counter, rhs_counter))
+ })
+ {
+ if lhs_counter.is_zero() && op.is_subtract() {
+ // The left side of a subtraction was probably optimized out. As an example,
+ // a branch condition might be evaluated as a constant expression, and the
+ // branch could be removed, dropping unused counters in the process.
+ //
+ // Since counters are unsigned, we must assume the result of the expression
+ // can be no more and no less than zero. An expression known to evaluate to zero
+ // does not need to be added to the coverage map.
+ //
+ // Coverage test `loops_branches.rs` includes multiple variations of branches
+ // based on constant conditional (literal `true` or `false`), and demonstrates
+ // that the expected counts are still correct.
+ debug!(
+ "Expression subtracts from zero (assume unreachable): \
+ original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}",
+ original_index, lhs, op, rhs, optional_region,
+ );
+ rhs_counter = Counter::zero();
+ }
+ debug_assert!(
+ lhs_counter.is_zero()
+ // Note: with `as usize` the ID _could_ overflow/wrap if `usize = u16`
+ || ((lhs_counter.zero_based_id() as usize)
+ <= usize::max(self.counters.len(), self.expressions.len())),
+ "lhs id={} > both counters.len()={} and expressions.len()={}
+ ({:?} {:?} {:?})",
+ lhs_counter.zero_based_id(),
+ self.counters.len(),
+ self.expressions.len(),
+ lhs_counter,
+ op,
+ rhs_counter,
+ );
+
+ debug_assert!(
+ rhs_counter.is_zero()
+ // Note: with `as usize` the ID _could_ overflow/wrap if `usize = u16`
+ || ((rhs_counter.zero_based_id() as usize)
+ <= usize::max(self.counters.len(), self.expressions.len())),
+ "rhs id={} > both counters.len()={} and expressions.len()={}
+ ({:?} {:?} {:?})",
+ rhs_counter.zero_based_id(),
+ self.counters.len(),
+ self.expressions.len(),
+ lhs_counter,
+ op,
+ rhs_counter,
+ );
+
+ // Both operands exist. `Expression` operands exist in `self.expressions` and have
+ // been assigned a `new_index`.
+ let mapped_expression_index =
+ MappedExpressionIndex::from(counter_expressions.len());
+ let expression = CounterExpression::new(
+ lhs_counter,
+ match op {
+ Op::Add => ExprKind::Add,
+ Op::Subtract => ExprKind::Subtract,
+ },
+ rhs_counter,
+ );
+ debug!(
+ "Adding expression {:?} = {:?}, region: {:?}",
+ mapped_expression_index, expression, optional_region
+ );
+ counter_expressions.push(expression);
+ new_indexes[original_index] = Some(mapped_expression_index);
+ if let Some(region) = optional_region {
+ expression_regions.push((Counter::expression(mapped_expression_index), region));
+ }
+ } else {
+ bug!(
+ "expression has one or more missing operands \
+ original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}",
+ original_index,
+ lhs,
+ op,
+ rhs,
+ optional_region,
+ );
+ }
+ }
+ (counter_expressions, expression_regions.into_iter())
+ }
+
+ fn unreachable_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
+ self.unreachable_regions.iter().map(|region| (Counter::zero(), region))
+ }
+
+ fn expression_index(&self, id_descending_from_max: u32) -> InjectedExpressionIndex {
+ debug_assert!(id_descending_from_max >= self.counters.len() as u32);
+ InjectedExpressionIndex::from(u32::MAX - id_descending_from_max)
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index 21a1ac348..a1ff2aa66 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -1,10 +1,10 @@
use crate::common::CodegenCx;
use crate::coverageinfo;
+use crate::coverageinfo::map_data::{Counter, CounterExpression};
use crate::llvm;
use llvm::coverageinfo::CounterMappingRegion;
-use rustc_codegen_ssa::coverageinfo::map::{Counter, CounterExpression};
-use rustc_codegen_ssa::traits::{ConstMethods, CoverageInfoMethods};
+use rustc_codegen_ssa::traits::ConstMethods;
use rustc_data_structures::fx::FxIndexSet;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::DefId;
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index cd261293e..42fdbd786 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -3,13 +3,13 @@ use crate::llvm;
use crate::abi::Abi;
use crate::builder::Builder;
use crate::common::CodegenCx;
+use crate::coverageinfo::map_data::{CounterExpression, FunctionCoverage};
use libc::c_uint;
use llvm::coverageinfo::CounterMappingRegion;
-use rustc_codegen_ssa::coverageinfo::map::{CounterExpression, FunctionCoverage};
use rustc_codegen_ssa::traits::{
- BaseTypeMethods, BuilderMethods, ConstMethods, CoverageInfoBuilderMethods, CoverageInfoMethods,
- MiscMethods, StaticMethods,
+ BaseTypeMethods, BuilderMethods, ConstMethods, CoverageInfoBuilderMethods, MiscMethods,
+ StaticMethods,
};
use rustc_data_structures::fx::FxHashMap;
use rustc_hir as hir;
@@ -17,16 +17,20 @@ use rustc_hir::def_id::DefId;
use rustc_llvm::RustString;
use rustc_middle::bug;
use rustc_middle::mir::coverage::{
- CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId, Op,
+ CodeRegion, CounterValueReference, CoverageKind, ExpressionOperandId, InjectedExpressionId, Op,
};
+use rustc_middle::mir::Coverage;
use rustc_middle::ty;
-use rustc_middle::ty::layout::FnAbiOf;
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
use rustc_middle::ty::subst::InternalSubsts;
use rustc_middle::ty::Instance;
+use rustc_middle::ty::Ty;
use std::cell::RefCell;
use std::ffi::CString;
+mod ffi;
+pub(crate) mod map_data;
pub mod mapgen;
const UNUSED_FUNCTION_COUNTER_ID: CounterValueReference = CounterValueReference::START;
@@ -53,11 +57,17 @@ impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> {
}
}
-impl<'ll, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
- fn coverageinfo_finalize(&self) {
+// These methods used to be part of trait `CoverageInfoMethods`, which no longer
+// exists after most coverage code was moved out of SSA.
+impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
+ pub(crate) fn coverageinfo_finalize(&self) {
mapgen::finalize(self)
}
+ /// For LLVM codegen, returns a function-specific `Value` for a global
+ /// string, to hold the function name passed to LLVM intrinsic
+ /// `instrprof.increment()`. The `Value` is only created once per instance.
+ /// Multiple invocations with the same instance return the same `Value`.
fn get_pgo_func_name_var(&self, instance: Instance<'tcx>) -> &'ll llvm::Value {
if let Some(coverage_context) = self.coverage_context() {
debug!("getting pgo_func_name_var for instance={:?}", instance);
@@ -94,6 +104,54 @@ impl<'ll, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
}
impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
+ fn add_coverage(&mut self, instance: Instance<'tcx>, coverage: &Coverage) {
+ let bx = self;
+
+ let Coverage { kind, code_region } = coverage.clone();
+ match kind {
+ CoverageKind::Counter { function_source_hash, id } => {
+ if bx.set_function_source_hash(instance, function_source_hash) {
+ // If `set_function_source_hash()` returned true, the coverage map is enabled,
+ // so continue adding the counter.
+ if let Some(code_region) = code_region {
+ // Note: Some counters do not have code regions, but may still be referenced
+ // from expressions. In that case, don't add the counter to the coverage map,
+ // but do inject the counter intrinsic.
+ bx.add_coverage_counter(instance, id, code_region);
+ }
+
+ let coverageinfo = bx.tcx().coverageinfo(instance.def);
+
+ let fn_name = bx.get_pgo_func_name_var(instance);
+ let hash = bx.const_u64(function_source_hash);
+ let num_counters = bx.const_u32(coverageinfo.num_counters);
+ let index = bx.const_u32(id.zero_based_index());
+ debug!(
+ "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
+ fn_name, hash, num_counters, index,
+ );
+ bx.instrprof_increment(fn_name, hash, num_counters, index);
+ }
+ }
+ CoverageKind::Expression { id, lhs, op, rhs } => {
+ bx.add_coverage_counter_expression(instance, id, lhs, op, rhs, code_region);
+ }
+ CoverageKind::Unreachable => {
+ bx.add_coverage_unreachable(
+ instance,
+ code_region.expect("unreachable regions always have code regions"),
+ );
+ }
+ }
+ }
+}
+
+// These methods used to be part of trait `CoverageInfoBuilderMethods`, but
+// after moving most coverage code out of SSA they are now just ordinary methods.
+impl<'tcx> Builder<'_, '_, 'tcx> {
+ /// Returns true if the function source hash was added to the coverage map (even if it had
+ /// already been added, for this instance). Returns false *only* if `-C instrument-coverage` is
+ /// not enabled (a coverage map is not being generated).
fn set_function_source_hash(
&mut self,
instance: Instance<'tcx>,
@@ -115,6 +173,8 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
}
}
+ /// Returns true if the counter was added to the coverage map; false if `-C instrument-coverage`
+ /// is not enabled (a coverage map is not being generated).
fn add_coverage_counter(
&mut self,
instance: Instance<'tcx>,
@@ -137,6 +197,8 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
}
}
+ /// Returns true if the expression was added to the coverage map; false if
+ /// `-C instrument-coverage` is not enabled (a coverage map is not being generated).
fn add_coverage_counter_expression(
&mut self,
instance: Instance<'tcx>,
@@ -163,6 +225,8 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
}
}
+ /// Returns true if the region was added to the coverage map; false if `-C instrument-coverage`
+ /// is not enabled (a coverage map is not being generated).
fn add_coverage_unreachable(&mut self, instance: Instance<'tcx>, region: CodeRegion) -> bool {
if let Some(coverage_context) = self.coverage_context() {
debug!(
@@ -199,8 +263,8 @@ fn declare_unused_fn<'tcx>(cx: &CodegenCx<'_, 'tcx>, def_id: DefId) -> Instance<
tcx.symbol_name(instance).name,
cx.fn_abi_of_fn_ptr(
ty::Binder::dummy(tcx.mk_fn_sig(
- [tcx.mk_unit()],
- tcx.mk_unit(),
+ [Ty::new_unit(tcx)],
+ Ty::new_unit(tcx),
false,
hir::Unsafety::Unsafe,
Abi::Rust,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
index 3fff112a0..65cbd5edc 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
@@ -65,10 +65,10 @@ fn make_mir_scope<'ll, 'tcx>(
debug_context.scopes[parent]
} else {
// The root is the function itself.
- let loc = cx.lookup_debug_loc(mir.span.lo());
+ let file = cx.sess().source_map().lookup_source_file(mir.span.lo());
debug_context.scopes[scope] = DebugScope {
- file_start_pos: loc.file.start_pos,
- file_end_pos: loc.file.end_pos,
+ file_start_pos: file.start_pos,
+ file_end_pos: file.end_pos,
..debug_context.scopes[scope]
};
instantiated.insert(scope);
@@ -93,7 +93,7 @@ fn make_mir_scope<'ll, 'tcx>(
let callee = cx.tcx.subst_and_normalize_erasing_regions(
instance.substs,
ty::ParamEnv::reveal_all(),
- ty::EarlyBinder(callee),
+ ty::EarlyBinder::bind(callee),
);
let callee_fn_abi = cx.fn_abi_of_instance(callee, ty::List::empty());
cx.dbg_scope_fn(callee, callee_fn_abi, None)
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index bd2fba126..d61400d3f 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -168,7 +168,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
// a (fat) pointer. Make sure it is not called for e.g. `Box<T, NonZSTAllocator>`.
debug_assert_eq!(
cx.size_and_align_of(ptr_type),
- cx.size_and_align_of(cx.tcx.mk_mut_ptr(pointee_type))
+ cx.size_and_align_of(Ty::new_mut_ptr(cx.tcx, pointee_type))
);
let pointee_type_di_node = type_di_node(cx, pointee_type);
@@ -223,8 +223,11 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
// at all and instead emit regular struct debuginfo for it. We just
// need to make sure that we don't break existing debuginfo consumers
// by doing that (at least not without a warning period).
- let layout_type =
- if ptr_type.is_box() { cx.tcx.mk_mut_ptr(pointee_type) } else { ptr_type };
+ let layout_type = if ptr_type.is_box() {
+ Ty::new_mut_ptr(cx.tcx, pointee_type)
+ } else {
+ ptr_type
+ };
let layout = cx.layout_of(layout_type);
let addr_field = layout.field(cx, abi::FAT_PTR_ADDR);
@@ -430,7 +433,7 @@ pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll D
return existing_di_node;
}
- debug!("type_di_node: {:?}", t);
+ debug!("type_di_node: {:?} kind: {:?}", t, t.kind());
let DINodeCreationResult { di_node, already_stored_in_typemap } = match *t.kind() {
ty::Never | ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) => {
@@ -1034,7 +1037,7 @@ fn build_upvar_field_di_nodes<'ll, 'tcx>(
build_field_di_node(
cx,
closure_or_generator_di_node,
- capture_name,
+ capture_name.as_str(),
cx.size_and_align_of(up_var_ty),
layout.fields.offset(index),
DIFlags::FlagZero,
@@ -1298,7 +1301,7 @@ fn build_vtable_type_di_node<'ll, 'tcx>(
// All function pointers are described as opaque pointers. This could be improved in the future
// by describing them as actual function pointers.
- let void_pointer_ty = tcx.mk_imm_ptr(tcx.types.unit);
+ let void_pointer_ty = Ty::new_imm_ptr(tcx, tcx.types.unit);
let void_pointer_type_di_node = type_di_node(cx, void_pointer_ty);
let usize_di_node = type_di_node(cx, tcx.types.usize);
let (pointer_size, pointer_align) = cx.size_and_align_of(void_pointer_ty);
@@ -1388,7 +1391,7 @@ fn vcall_visibility_metadata<'ll, 'tcx>(
let trait_def_id = trait_ref_self.def_id();
let trait_vis = cx.tcx.visibility(trait_def_id);
- let cgus = cx.sess().codegen_units();
+ let cgus = cx.sess().codegen_units().as_usize();
let single_cgu = cgus == 1;
let lto = cx.sess().lto();
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
index ecb0912d3..b2765ffc9 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
@@ -676,8 +676,7 @@ fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
_ => unreachable!(),
};
- let (generator_layout, state_specific_upvar_names) =
- cx.tcx.generator_layout_and_saved_local_names(generator_def_id);
+ let generator_layout = cx.tcx.optimized_mir(generator_def_id).generator_layout().unwrap();
let common_upvar_names = cx.tcx.closure_saved_names_of_captured_variables(generator_def_id);
let variant_range = generator_substs.variant_range(generator_def_id, cx.tcx);
@@ -714,7 +713,6 @@ fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
generator_type_and_layout,
generator_type_di_node,
generator_layout,
- &state_specific_upvar_names,
&common_upvar_names,
);
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
index 9e0e847a1..8746ce0c5 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
@@ -6,7 +6,7 @@ use rustc_hir::def::CtorKind;
use rustc_index::IndexSlice;
use rustc_middle::{
bug,
- mir::{GeneratorLayout, GeneratorSavedLocal},
+ mir::GeneratorLayout,
ty::{
self,
layout::{IntegerExt, LayoutOf, PrimitiveExt, TyAndLayout},
@@ -323,8 +323,7 @@ pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
generator_type_and_layout: TyAndLayout<'tcx>,
generator_type_di_node: &'ll DIType,
generator_layout: &GeneratorLayout<'tcx>,
- state_specific_upvar_names: &IndexSlice<GeneratorSavedLocal, Option<Symbol>>,
- common_upvar_names: &[String],
+ common_upvar_names: &IndexSlice<FieldIdx, Symbol>,
) -> &'ll DIType {
let variant_name = GeneratorSubsts::variant_name(variant_index);
let unique_type_id = UniqueTypeId::for_enum_variant_struct_type(
@@ -357,7 +356,7 @@ pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
.map(|field_index| {
let generator_saved_local = generator_layout.variant_fields[variant_index]
[FieldIdx::from_usize(field_index)];
- let field_name_maybe = state_specific_upvar_names[generator_saved_local];
+ let field_name_maybe = generator_layout.field_names[generator_saved_local];
let field_name = field_name_maybe
.as_ref()
.map(|s| Cow::from(s.as_str()))
@@ -380,12 +379,13 @@ pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
// Fields that are common to all states
let common_fields: SmallVec<_> = generator_substs
.prefix_tys()
+ .zip(common_upvar_names)
.enumerate()
- .map(|(index, upvar_ty)| {
+ .map(|(index, (upvar_ty, upvar_name))| {
build_field_di_node(
cx,
variant_struct_type_di_node,
- &common_upvar_names[index],
+ upvar_name.as_str(),
cx.size_and_align_of(upvar_ty),
generator_type_and_layout.fields.offset(index),
DIFlags::FlagZero,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
index 978141917..4d1cd6486 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
@@ -155,8 +155,8 @@ pub(super) fn build_generator_di_node<'ll, 'tcx>(
DIFlags::FlagZero,
),
|cx, generator_type_di_node| {
- let (generator_layout, state_specific_upvar_names) =
- cx.tcx.generator_layout_and_saved_local_names(generator_def_id);
+ let generator_layout =
+ cx.tcx.optimized_mir(generator_def_id).generator_layout().unwrap();
let Variants::Multiple { tag_encoding: TagEncoding::Direct, ref variants, .. } = generator_type_and_layout.variants else {
bug!(
@@ -195,7 +195,6 @@ pub(super) fn build_generator_di_node<'ll, 'tcx>(
generator_type_and_layout,
generator_type_di_node,
generator_layout,
- &state_specific_upvar_names,
&common_upvar_names,
),
source_info,
@@ -412,13 +411,7 @@ fn build_enum_variant_member_di_node<'ll, 'tcx>(
enum_type_and_layout.size.bits(),
enum_type_and_layout.align.abi.bits() as u32,
Size::ZERO.bits(),
- discr_value.opt_single_val().map(|value| {
- // NOTE(eddyb) do *NOT* remove this assert, until
- // we pass the full 128-bit value to LLVM, otherwise
- // truncation will be silent and remain undetected.
- assert_eq!(value as u64 as u128, value);
- cx.const_u64(value as u64)
- }),
+ discr_value.opt_single_val().map(|value| cx.const_u128(value)),
DIFlags::FlagZero,
variant_member_info.variant_struct_type_di_node,
)
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index c3f0a0033..b924c771a 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -263,7 +263,7 @@ impl CodegenCx<'_, '_> {
pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
Ok(SourceFileAndLine { sf: file, line }) => {
- let line_pos = file.line_begin_pos(pos);
+ let line_pos = file.lines(|lines| lines[line]);
// Use 1-based indexing.
let line = (line + 1) as u32;
@@ -332,7 +332,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), fn_signature)
};
- let mut name = String::new();
+ let mut name = String::with_capacity(64);
type_names::push_item_name(tcx, def_id, false, &mut name);
// Find the enclosing function, in case this is a closure.
@@ -454,7 +454,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
ty::Array(ct, _)
if (*ct == cx.tcx.types.u8) || cx.layout_of(*ct).is_zst() =>
{
- cx.tcx.mk_imm_ptr(*ct)
+ Ty::new_imm_ptr(cx.tcx, *ct)
}
_ => t,
};
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
index d5ea48c31..fa61c7dde 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
@@ -28,7 +28,7 @@ pub fn item_namespace<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DISco
.map(|parent| item_namespace(cx, DefId { krate: def_id.krate, index: parent }));
let namespace_name_string = {
- let mut output = String::new();
+ let mut output = String::with_capacity(64);
type_names::push_item_name(cx.tcx, def_id, false, &mut output);
output
};
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
index 6bcd3e5bf..7be836386 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
@@ -82,8 +82,8 @@ pub(crate) fn fat_pointer_kind<'ll, 'tcx>(
ty::Foreign(_) => {
// Assert that pointers to foreign types really are thin:
debug_assert_eq!(
- cx.size_of(cx.tcx.mk_imm_ptr(pointee_tail_ty)),
- cx.size_of(cx.tcx.mk_imm_ptr(cx.tcx.types.u8))
+ cx.size_of(Ty::new_imm_ptr(cx.tcx, pointee_tail_ty)),
+ cx.size_of(Ty::new_imm_ptr(cx.tcx, cx.tcx.types.u8))
);
None
}
diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs
index 6a9173ab4..44869ced1 100644
--- a/compiler/rustc_codegen_llvm/src/errors.rs
+++ b/compiler/rustc_codegen_llvm/src/errors.rs
@@ -50,9 +50,15 @@ pub(crate) struct SymbolAlreadyDefined<'a> {
}
#[derive(Diagnostic)]
-#[diag(codegen_llvm_invalid_minimum_alignment)]
-pub(crate) struct InvalidMinimumAlignment {
- pub err: String,
+#[diag(codegen_llvm_invalid_minimum_alignment_not_power_of_two)]
+pub(crate) struct InvalidMinimumAlignmentNotPowerOfTwo {
+ pub align: u64,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_invalid_minimum_alignment_too_large)]
+pub(crate) struct InvalidMinimumAlignmentTooLarge {
+ pub align: u64,
}
#[derive(Diagnostic)]
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 4e28034a8..a254c86c2 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -7,7 +7,7 @@ use crate::type_of::LayoutLlvmExt;
use crate::va_arg::emit_va_arg;
use crate::value::Value;
-use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh};
+use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh};
use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization};
use rustc_codegen_ssa::mir::operand::OperandRef;
@@ -452,6 +452,8 @@ fn try_intrinsic<'ll>(
bx.store(bx.const_i32(0), dest, ret_align);
} else if wants_msvc_seh(bx.sess()) {
codegen_msvc_try(bx, try_func, data, catch_func, dest);
+ } else if wants_wasm_eh(bx.sess()) {
+ codegen_wasm_try(bx, try_func, data, catch_func, dest);
} else if bx.sess().target.os == "emscripten" {
codegen_emcc_try(bx, try_func, data, catch_func, dest);
} else {
@@ -610,6 +612,80 @@ fn codegen_msvc_try<'ll>(
bx.store(ret, dest, i32_align);
}
+// WASM's definition of the `rust_try` function.
+fn codegen_wasm_try<'ll>(
+ bx: &mut Builder<'_, 'll, '_>,
+ try_func: &'ll Value,
+ data: &'ll Value,
+ catch_func: &'ll Value,
+ dest: &'ll Value,
+) {
+ let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
+ bx.set_personality_fn(bx.eh_personality());
+
+ let normal = bx.append_sibling_block("normal");
+ let catchswitch = bx.append_sibling_block("catchswitch");
+ let catchpad = bx.append_sibling_block("catchpad");
+ let caught = bx.append_sibling_block("caught");
+
+ let try_func = llvm::get_param(bx.llfn(), 0);
+ let data = llvm::get_param(bx.llfn(), 1);
+ let catch_func = llvm::get_param(bx.llfn(), 2);
+
+ // We're generating an IR snippet that looks like:
+ //
+ // declare i32 @rust_try(%try_func, %data, %catch_func) {
+ // %slot = alloca i8*
+ // invoke %try_func(%data) to label %normal unwind label %catchswitch
+ //
+ // normal:
+ // ret i32 0
+ //
+ // catchswitch:
+ // %cs = catchswitch within none [%catchpad] unwind to caller
+ //
+ // catchpad:
+ // %tok = catchpad within %cs [null]
+ // %ptr = call @llvm.wasm.get.exception(token %tok)
+ // %sel = call @llvm.wasm.get.ehselector(token %tok)
+ // call %catch_func(%data, %ptr)
+ // catchret from %tok to label %caught
+ //
+ // caught:
+ // ret i32 1
+ // }
+ //
+ let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+ bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None);
+
+ bx.switch_to_block(normal);
+ bx.ret(bx.const_i32(0));
+
+ bx.switch_to_block(catchswitch);
+ let cs = bx.catch_switch(None, None, &[catchpad]);
+
+ bx.switch_to_block(catchpad);
+ let null = bx.const_null(bx.type_i8p());
+ let funclet = bx.catch_pad(cs, &[null]);
+
+ let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[funclet.cleanuppad()]);
+ let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[funclet.cleanuppad()]);
+
+ let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+ bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet));
+ bx.catch_ret(&funclet, caught);
+
+ bx.switch_to_block(caught);
+ bx.ret(bx.const_i32(1));
+ });
+
+ // Note that no invoke is used here because by definition this function
+ // can't panic (that's what it's catching).
+ let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None);
+ let i32_align = bx.tcx().data_layout.i32_align.abi;
+ bx.store(ret, dest, i32_align);
+}
+
// Definition of the standard `try` function for Rust using the GNU-like model
// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
// instructions).
@@ -797,23 +873,29 @@ fn get_rust_try_fn<'ll, 'tcx>(
// Define the type up front for the signature of the rust_try function.
let tcx = cx.tcx;
- let i8p = tcx.mk_mut_ptr(tcx.types.i8);
+ let i8p = Ty::new_mut_ptr(tcx, tcx.types.i8);
// `unsafe fn(*mut i8) -> ()`
- let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
- [i8p],
- tcx.mk_unit(),
- false,
- hir::Unsafety::Unsafe,
- Abi::Rust,
- )));
+ let try_fn_ty = Ty::new_fn_ptr(
+ tcx,
+ ty::Binder::dummy(tcx.mk_fn_sig(
+ [i8p],
+ Ty::new_unit(tcx),
+ false,
+ hir::Unsafety::Unsafe,
+ Abi::Rust,
+ )),
+ );
// `unsafe fn(*mut i8, *mut i8) -> ()`
- let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
- [i8p, i8p],
- tcx.mk_unit(),
- false,
- hir::Unsafety::Unsafe,
- Abi::Rust,
- )));
+ let catch_fn_ty = Ty::new_fn_ptr(
+ tcx,
+ ty::Binder::dummy(tcx.mk_fn_sig(
+ [i8p, i8p],
+ Ty::new_unit(tcx),
+ false,
+ hir::Unsafety::Unsafe,
+ Abi::Rust,
+ )),
+ );
// `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
[try_fn_ty, i8p, catch_fn_ty],
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index 805843e58..24ba28bbc 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -33,7 +33,7 @@ use rustc_codegen_ssa::back::write::{
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::ModuleCodegen;
use rustc_codegen_ssa::{CodegenResults, CompiledModule};
-use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::fx::FxIndexMap;
use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, FatalError, Handler, SubdiagnosticMessage};
use rustc_fluent_macro::fluent_messages;
use rustc_metadata::EncodedMetadata;
@@ -355,7 +355,7 @@ impl CodegenBackend for LlvmCodegenBackend {
ongoing_codegen: Box<dyn Any>,
sess: &Session,
outputs: &OutputFilenames,
- ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
+ ) -> Result<(CodegenResults, FxIndexMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
let (codegen_results, work_products) = ongoing_codegen
.downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<LlvmCodegenBackend>>()
.expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>")
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index de93a64c0..3ad546b61 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -1,7 +1,7 @@
#![allow(non_camel_case_types)]
#![allow(non_upper_case_globals)]
-use rustc_codegen_ssa::coverageinfo::map as coverage_map;
+use crate::coverageinfo::map_data as coverage_map;
use super::debuginfo::{
DIArray, DIBasicType, DIBuilder, DICompositeType, DIDerivedType, DIDescriptor, DIEnumerator,
@@ -196,6 +196,7 @@ pub enum AttributeKind {
AllocSize = 37,
AllocatedPointer = 38,
AllocAlign = 39,
+ SanitizeSafeStack = 40,
}
/// LLVMIntPredicate
@@ -584,6 +585,16 @@ pub enum ThreadLocalMode {
LocalExec,
}
+/// LLVMRustTailCallKind
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum TailCallKind {
+ None,
+ Tail,
+ MustTail,
+ NoTail,
+}
+
/// LLVMRustChecksumKind
#[derive(Copy, Clone)]
#[repr(C)]
@@ -1070,6 +1081,7 @@ extern "C" {
// Operations on other types
pub fn LLVMVoidTypeInContext(C: &Context) -> &Type;
+ pub fn LLVMTokenTypeInContext(C: &Context) -> &Type;
pub fn LLVMMetadataTypeInContext(C: &Context) -> &Type;
// Operations on all values
@@ -1194,6 +1206,7 @@ extern "C" {
NameLen: size_t,
) -> Option<&Value>;
pub fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool);
+ pub fn LLVMRustSetTailCallKind(CallInst: &Value, TKC: TailCallKind);
// Operations on attributes
pub fn LLVMRustCreateAttrNoValue(C: &Context, attr: AttributeKind) -> &Attribute;
@@ -1300,7 +1313,7 @@ extern "C" {
NumArgs: c_uint,
Then: &'a BasicBlock,
Catch: &'a BasicBlock,
- OpBundles: *const Option<&OperandBundleDef<'a>>,
+ OpBundles: *const &OperandBundleDef<'a>,
NumOpBundles: c_uint,
Name: *const c_char,
) -> &'a Value;
@@ -1672,7 +1685,7 @@ extern "C" {
Fn: &'a Value,
Args: *const &'a Value,
NumArgs: c_uint,
- OpBundles: *const Option<&OperandBundleDef<'a>>,
+ OpBundles: *const &OperandBundleDef<'a>,
NumOpBundles: c_uint,
) -> &'a Value;
pub fn LLVMRustBuildMemCpy<'a>(
@@ -2511,6 +2524,7 @@ extern "C" {
remark_all_passes: bool,
remark_passes: *const *const c_char,
remark_passes_len: usize,
+ remark_file: *const c_char,
);
#[allow(improper_ctypes)]
diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs
index d3fad5699..7e672a8dc 100644
--- a/compiler/rustc_codegen_llvm/src/type_.rs
+++ b/compiler/rustc_codegen_llvm/src/type_.rs
@@ -52,6 +52,10 @@ impl<'ll> CodegenCx<'ll, '_> {
unsafe { llvm::LLVMVoidTypeInContext(self.llcx) }
}
+ pub(crate) fn type_token(&self) -> &'ll Type {
+ unsafe { llvm::LLVMTokenTypeInContext(self.llcx) }
+ }
+
pub(crate) fn type_metadata(&self) -> &'ll Type {
unsafe { llvm::LLVMMetadataTypeInContext(self.llcx) }
}
@@ -288,6 +292,9 @@ impl<'ll, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn reg_backend_type(&self, ty: &Reg) -> &'ll Type {
ty.llvm_type(self)
}
+ fn scalar_copy_backend_type(&self, layout: TyAndLayout<'tcx>) -> Option<Self::Type> {
+ layout.scalar_copy_llvm_type(self)
+ }
}
impl<'ll, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'ll, 'tcx> {
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index e264ce78f..7f7da8483 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -6,6 +6,7 @@ use rustc_middle::bug;
use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
use rustc_middle::ty::{self, Ty, TypeVisitableExt};
+use rustc_target::abi::HasDataLayout;
use rustc_target::abi::{Abi, Align, FieldsShape};
use rustc_target::abi::{Int, Pointer, F32, F64};
use rustc_target::abi::{PointeeInfo, Scalar, Size, TyAbiInterface, Variants};
@@ -192,14 +193,14 @@ pub trait LayoutLlvmExt<'tcx> {
) -> &'a Type;
fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64;
fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo>;
+ fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type>;
}
impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
fn is_llvm_immediate(&self) -> bool {
match self.abi {
Abi::Scalar(_) | Abi::Vector { .. } => true,
- Abi::ScalarPair(..) => false,
- Abi::Uninhabited | Abi::Aggregate { .. } => self.is_zst(),
+ Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false,
}
}
@@ -336,12 +337,13 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
// only wide pointer boxes are handled as pointers
// thin pointer boxes with scalar allocators are handled by the general logic below
ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => {
- let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
+ let ptr_ty = Ty::new_mut_ptr(cx.tcx, self.ty.boxed_ty());
return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
}
// `dyn* Trait` has the same ABI as `*mut dyn Trait`
ty::Dynamic(bounds, region, ty::DynStar) => {
- let ptr_ty = cx.tcx.mk_mut_ptr(cx.tcx.mk_dynamic(bounds, region, ty::Dyn));
+ let ptr_ty =
+ Ty::new_mut_ptr(cx.tcx, Ty::new_dynamic(cx.tcx, bounds, region, ty::Dyn));
return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
}
_ => {}
@@ -415,4 +417,39 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
result
}
+
+ fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type> {
+ debug_assert!(self.is_sized());
+
+ // FIXME: this is a fairly arbitrary choice, but 128 bits on WASM
+ // (matching the 128-bit SIMD types proposal) and 256 bits on x64
+ // (like AVX2 registers) seems at least like a tolerable starting point.
+ let threshold = cx.data_layout().pointer_size * 4;
+ if self.layout.size() > threshold {
+ return None;
+ }
+
+ // Vectors, even for non-power-of-two sizes, have the same layout as
+ // arrays but don't count as aggregate types
+ // While LLVM theoretically supports non-power-of-two sizes, and they
+ // often work fine, sometimes x86-isel deals with them horribly
+ // (see #115212) so for now only use power-of-two ones.
+ if let FieldsShape::Array { count, .. } = self.layout.fields()
+ && count.is_power_of_two()
+ && let element = self.field(cx, 0)
+ && element.ty.is_integral()
+ {
+ // `cx.type_ix(bits)` is tempting here, but while that works great
+ // for things that *stay* as memory-to-memory copies, it also ends
+ // up suppressing vectorization as it introduces shifts when it
+ // extracts all the individual values.
+
+ let ety = element.llvm_type(cx);
+ return Some(cx.type_vector(ety, *count));
+ }
+
+ // FIXME: The above only handled integer arrays; surely more things
+ // would also be possible. Be careful about provenance, though!
+ None
+ }
}
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index b19398e68..8800caa71 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -73,7 +73,7 @@ fn emit_ptr_va_arg<'ll, 'tcx>(
let layout = bx.cx.layout_of(target_ty);
let (llty, size, align) = if indirect {
(
- bx.cx.layout_of(bx.cx.tcx.mk_imm_ptr(target_ty)).llvm_type(bx.cx),
+ bx.cx.layout_of(Ty::new_imm_ptr(bx.cx.tcx, target_ty)).llvm_type(bx.cx),
bx.cx.data_layout().pointer_size,
bx.cx.data_layout().pointer_align,
)