summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_llvm/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs13
-rw-r--r--compiler/rustc_codegen_llvm/src/attributes.rs42
-rw-r--r--compiler/rustc_codegen_llvm/src/back/archive.rs8
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs65
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs104
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/callee.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/common.rs14
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs28
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs42
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs50
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs3
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/errors.rs134
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs19
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs20
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs6
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs28
-rw-r--r--compiler/rustc_codegen_llvm/src/mono_item.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs33
22 files changed, 369 insertions, 261 deletions
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index 52c8b5179..d9f8170a3 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -849,6 +849,7 @@ fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'
/// Helper function to get the LLVM type for a Scalar. Pointers are returned as
/// the equivalent integer type.
fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type {
+ let dl = &cx.tcx.data_layout;
match scalar.primitive() {
Primitive::Int(Integer::I8, _) => cx.type_i8(),
Primitive::Int(Integer::I16, _) => cx.type_i16(),
@@ -856,7 +857,8 @@ fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Ty
Primitive::Int(Integer::I64, _) => cx.type_i64(),
Primitive::F32 => cx.type_f32(),
Primitive::F64 => cx.type_f64(),
- Primitive::Pointer => cx.type_isize(),
+ // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
+ Primitive::Pointer(_) => cx.type_from_integer(dl.ptr_sized_integer()),
_ => unreachable!(),
}
}
@@ -868,6 +870,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
reg: InlineAsmRegClass,
layout: &TyAndLayout<'tcx>,
) -> &'ll Value {
+ let dl = &bx.tcx.data_layout;
match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.primitive() {
@@ -881,8 +884,10 @@ fn llvm_fixup_input<'ll, 'tcx>(
let elem_ty = llvm_asm_scalar_type(bx.cx, s);
let count = 16 / layout.size.bytes();
let vec_ty = bx.cx.type_vector(elem_ty, count);
- if let Primitive::Pointer = s.primitive() {
- value = bx.ptrtoint(value, bx.cx.type_isize());
+ // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
+ if let Primitive::Pointer(_) = s.primitive() {
+ let t = bx.type_from_integer(dl.ptr_sized_integer());
+ value = bx.ptrtoint(value, t);
}
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
}
@@ -958,7 +963,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
}
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
value = bx.extract_element(value, bx.const_i32(0));
- if let Primitive::Pointer = s.primitive() {
+ if let Primitive::Pointer(_) = s.primitive() {
value = bx.inttoptr(value, layout.llvm_type(bx.cx));
}
value
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index 95baa95b0..651d644eb 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -62,7 +62,7 @@ pub fn sanitize_attrs<'ll>(
) -> SmallVec<[&'ll Attribute; 4]> {
let mut attrs = SmallVec::new();
let enabled = cx.tcx.sess.opts.unstable_opts.sanitizer - no_sanitize;
- if enabled.contains(SanitizerSet::ADDRESS) {
+ if enabled.contains(SanitizerSet::ADDRESS) || enabled.contains(SanitizerSet::KERNELADDRESS) {
attrs.push(llvm::AttributeKind::SanitizeAddress.create_attr(cx.llcx));
}
if enabled.contains(SanitizerSet::MEMORY) {
@@ -118,7 +118,8 @@ pub fn frame_pointer_type_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attr
/// Tell LLVM what instrument function to insert.
#[inline]
-fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 4]> {
+ let mut attrs = SmallVec::new();
if cx.sess().opts.unstable_opts.instrument_mcount {
// Similar to `clang -pg` behavior. Handled by the
// `post-inline-ee-instrument` LLVM pass.
@@ -127,14 +128,41 @@ fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribu
// See test/CodeGen/mcount.c in clang.
let mcount_name = cx.sess().target.mcount.as_ref();
- Some(llvm::CreateAttrStringValue(
+ attrs.push(llvm::CreateAttrStringValue(
cx.llcx,
"instrument-function-entry-inlined",
&mcount_name,
- ))
- } else {
- None
+ ));
+ }
+ if let Some(options) = &cx.sess().opts.unstable_opts.instrument_xray {
+ // XRay instrumentation is similar to __cyg_profile_func_{enter,exit}.
+ // Function prologue and epilogue are instrumented with NOP sleds,
+ // a runtime library later replaces them with detours into tracing code.
+ if options.always {
+ attrs.push(llvm::CreateAttrStringValue(cx.llcx, "function-instrument", "xray-always"));
+ }
+ if options.never {
+ attrs.push(llvm::CreateAttrStringValue(cx.llcx, "function-instrument", "xray-never"));
+ }
+ if options.ignore_loops {
+ attrs.push(llvm::CreateAttrString(cx.llcx, "xray-ignore-loops"));
+ }
+ // LLVM will not choose the default for us, but rather requires specific
+ // threshold in absence of "xray-always". Use the same default as Clang.
+ let threshold = options.instruction_threshold.unwrap_or(200);
+ attrs.push(llvm::CreateAttrStringValue(
+ cx.llcx,
+ "xray-instruction-threshold",
+ &threshold.to_string(),
+ ));
+ if options.skip_entry {
+ attrs.push(llvm::CreateAttrString(cx.llcx, "xray-skip-entry"));
+ }
+ if options.skip_exit {
+ attrs.push(llvm::CreateAttrString(cx.llcx, "xray-skip-exit"));
+ }
}
+ attrs
}
fn nojumptables_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
@@ -441,7 +469,7 @@ pub fn from_fn_attrs<'ll, 'tcx>(
// the WebAssembly specification, which has this feature. This won't be
// needed when LLVM enables this `multivalue` feature by default.
if !cx.tcx.is_closure(instance.def_id()) {
- let abi = cx.tcx.fn_sig(instance.def_id()).abi();
+ let abi = cx.tcx.fn_sig(instance.def_id()).skip_binder().abi();
if abi == Abi::Wasm {
function_features.push("+multivalue".to_string());
}
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
index 58ca87524..dd3268d77 100644
--- a/compiler/rustc_codegen_llvm/src/back/archive.rs
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -183,6 +183,12 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
// able to control the *exact* spelling of each of the symbols that are being imported:
// hence we don't want `dlltool` adding leading underscores automatically.
let dlltool = find_binutils_dlltool(sess);
+ let temp_prefix = {
+ let mut path = PathBuf::from(&output_path);
+ path.pop();
+ path.push(lib_name);
+ path
+ };
let result = std::process::Command::new(dlltool)
.args([
"-d",
@@ -192,6 +198,8 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
"-l",
output_path.to_str().unwrap(),
"--no-leading-underscore",
+ "--temp-prefix",
+ temp_prefix.to_str().unwrap(),
])
.output();
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index 6c0faf37a..d2e01708a 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -1,5 +1,7 @@
use crate::back::write::{self, save_temp_bitcode, DiagnosticHandlers};
-use crate::errors::DynamicLinkingWithLTO;
+use crate::errors::{
+ DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib,
+};
use crate::llvm::{self, build_string};
use crate::{LlvmCodegenBackend, ModuleLlvm};
use object::read::archive::ArchiveFile;
@@ -77,15 +79,12 @@ fn prepare_lto(
// Make sure we actually can run LTO
for crate_type in cgcx.crate_types.iter() {
if !crate_type_allows_lto(*crate_type) {
- let e = diag_handler.fatal(
- "lto can only be run for executables, cdylibs and \
- static library outputs",
- );
- return Err(e);
+ diag_handler.emit_err(LtoDisallowed);
+ return Err(FatalError);
} else if *crate_type == CrateType::Dylib {
if !cgcx.opts.unstable_opts.dylib_lto {
- return Err(diag_handler
- .fatal("lto cannot be used for `dylib` crate type without `-Zdylib-lto`"));
+ diag_handler.emit_err(LtoDylib);
+ return Err(FatalError);
}
}
}
@@ -127,7 +126,10 @@ fn prepare_lto(
let module = SerializedModule::FromRlib(data.to_vec());
upstream_modules.push((module, CString::new(name).unwrap()));
}
- Err(msg) => return Err(diag_handler.fatal(&msg)),
+ Err(e) => {
+ diag_handler.emit_err(e);
+ return Err(FatalError);
+ }
}
}
}
@@ -140,7 +142,7 @@ fn prepare_lto(
Ok((symbols_below_threshold, upstream_modules))
}
-fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], String> {
+fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], LtoBitcodeFromRlib> {
let mut len = 0;
let data =
unsafe { llvm::LLVMRustGetBitcodeSliceFromObjectData(obj.as_ptr(), obj.len(), &mut len) };
@@ -155,8 +157,9 @@ fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], String> {
Ok(bc)
} else {
assert!(len == 0);
- let msg = llvm::last_error().unwrap_or_else(|| "unknown LLVM error".to_string());
- Err(format!("failed to get bitcode from object file for LTO ({})", msg))
+ Err(LtoBitcodeFromRlib {
+ llvm_err: llvm::last_error().unwrap_or_else(|| "unknown LLVM error".to_string()),
+ })
}
}
@@ -328,10 +331,9 @@ fn fat_lto(
});
info!("linking {:?}", name);
let data = bc_decoded.data();
- linker.add(data).map_err(|()| {
- let msg = format!("failed to load bitcode of module {:?}", name);
- write::llvm_err(diag_handler, &msg)
- })?;
+ linker
+ .add(data)
+ .map_err(|()| write::llvm_err(diag_handler, LlvmError::LoadBitcode { name }))?;
serialized_bitcode.push(bc_decoded);
}
drop(linker);
@@ -489,7 +491,7 @@ fn thin_lto(
symbols_below_threshold.as_ptr(),
symbols_below_threshold.len() as u32,
)
- .ok_or_else(|| write::llvm_err(diag_handler, "failed to prepare thin LTO context"))?;
+ .ok_or_else(|| write::llvm_err(diag_handler, LlvmError::PrepareThinLtoContext))?;
let data = ThinData(data);
@@ -562,8 +564,7 @@ fn thin_lto(
// session, overwriting the previous serialized data (if any).
if let Some(path) = key_map_path {
if let Err(err) = curr_key_map.save_to_file(&path) {
- let msg = format!("Error while writing ThinLTO key data: {}", err);
- return Err(write::llvm_err(diag_handler, &msg));
+ return Err(write::llvm_err(diag_handler, LlvmError::WriteThinLtoKey { err }));
}
}
@@ -689,8 +690,7 @@ pub unsafe fn optimize_thin_module(
let module_name = &thin_module.shared.module_names[thin_module.idx];
let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, module_name.to_str().unwrap());
- let tm =
- (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&diag_handler, &e))?;
+ let tm = (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&diag_handler, e))?;
// Right now the implementation we've got only works over serialized
// modules, so we create a fresh new LLVM context and parse the module
@@ -717,8 +717,7 @@ pub unsafe fn optimize_thin_module(
let mut cu2 = ptr::null_mut();
llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2);
if !cu2.is_null() {
- let msg = "multiple source DICompileUnits found";
- return Err(write::llvm_err(&diag_handler, msg));
+ return Err(write::llvm_err(&diag_handler, LlvmError::MultipleSourceDiCompileUnit));
}
// Up next comes the per-module local analyses that we do for Thin LTO.
@@ -733,8 +732,7 @@ pub unsafe fn optimize_thin_module(
let _timer =
cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name());
if !llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target) {
- let msg = "failed to prepare thin LTO module";
- return Err(write::llvm_err(&diag_handler, msg));
+ return Err(write::llvm_err(&diag_handler, LlvmError::PrepareThinLtoModule));
}
save_temp_bitcode(cgcx, &module, "thin-lto-after-rename");
}
@@ -744,8 +742,7 @@ pub unsafe fn optimize_thin_module(
.prof
.generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name());
if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) {
- let msg = "failed to prepare thin LTO module";
- return Err(write::llvm_err(&diag_handler, msg));
+ return Err(write::llvm_err(&diag_handler, LlvmError::PrepareThinLtoModule));
}
save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve");
}
@@ -755,8 +752,7 @@ pub unsafe fn optimize_thin_module(
.prof
.generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name());
if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) {
- let msg = "failed to prepare thin LTO module";
- return Err(write::llvm_err(&diag_handler, msg));
+ return Err(write::llvm_err(&diag_handler, LlvmError::PrepareThinLtoModule));
}
save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize");
}
@@ -765,8 +761,7 @@ pub unsafe fn optimize_thin_module(
let _timer =
cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_import", thin_module.name());
if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target) {
- let msg = "failed to prepare thin LTO module";
- return Err(write::llvm_err(&diag_handler, msg));
+ return Err(write::llvm_err(&diag_handler, LlvmError::PrepareThinLtoModule));
}
save_temp_bitcode(cgcx, &module, "thin-lto-after-import");
}
@@ -886,11 +881,7 @@ pub fn parse_module<'a>(
diag_handler: &Handler,
) -> Result<&'a llvm::Module, FatalError> {
unsafe {
- llvm::LLVMRustParseBitcodeForLTO(cx, data.as_ptr(), data.len(), name.as_ptr()).ok_or_else(
- || {
- let msg = "failed to parse bitcode for LTO module";
- write::llvm_err(diag_handler, msg)
- },
- )
+ llvm::LLVMRustParseBitcodeForLTO(cx, data.as_ptr(), data.len(), name.as_ptr())
+ .ok_or_else(|| write::llvm_err(diag_handler, LlvmError::ParseBitcode))
}
}
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index b2af9f31e..a4ae1b01e 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -5,6 +5,9 @@ use crate::back::profiling::{
use crate::base;
use crate::common;
use crate::consts;
+use crate::errors::{
+ CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, WithLlvmError, WriteBytecode,
+};
use crate::llvm::{self, DiagnosticInfo, PassManager};
use crate::llvm_util;
use crate::type_::Type;
@@ -37,10 +40,10 @@ use std::slice;
use std::str;
use std::sync::Arc;
-pub fn llvm_err(handler: &rustc_errors::Handler, msg: &str) -> FatalError {
+pub fn llvm_err<'a>(handler: &rustc_errors::Handler, err: LlvmError<'a>) -> FatalError {
match llvm::last_error() {
- Some(err) => handler.fatal(&format!("{}: {}", msg, err)),
- None => handler.fatal(msg),
+ Some(llvm_err) => handler.emit_almost_fatal(WithLlvmError(err, llvm_err)),
+ None => handler.emit_almost_fatal(err),
}
}
@@ -85,10 +88,9 @@ pub fn write_output_file<'ll>(
}
}
- result.into_result().map_err(|()| {
- let msg = format!("could not write output to {}", output.display());
- llvm_err(handler, &msg)
- })
+ result
+ .into_result()
+ .map_err(|()| llvm_err(handler, LlvmError::WriteOutput { path: output }))
}
}
@@ -98,7 +100,7 @@ pub fn create_informational_target_machine(sess: &Session) -> &'static mut llvm:
// system/tcx is set up.
let features = llvm_util::global_llvm_features(sess, false);
target_machine_factory(sess, config::OptLevel::No, &features)(config)
- .unwrap_or_else(|err| llvm_err(sess.diagnostic(), &err).raise())
+ .unwrap_or_else(|err| llvm_err(sess.diagnostic(), err).raise())
}
pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut llvm::TargetMachine {
@@ -117,7 +119,7 @@ pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut ll
tcx.backend_optimization_level(()),
tcx.global_backend_features(()),
)(config)
- .unwrap_or_else(|err| llvm_err(tcx.sess.diagnostic(), &err).raise())
+ .unwrap_or_else(|err| llvm_err(tcx.sess.diagnostic(), err).raise())
}
pub fn to_llvm_opt_settings(
@@ -240,9 +242,7 @@ pub fn target_machine_factory(
)
};
- tm.ok_or_else(|| {
- format!("Could not create LLVM TargetMachine for triple: {}", triple.to_str().unwrap())
- })
+ tm.ok_or_else(|| LlvmError::CreateTargetMachine { triple: triple.clone() })
})
}
@@ -355,25 +355,28 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void
};
if enabled {
- diag_handler.note_without_error(&format!(
- "{}:{}:{}: {}: {}",
- opt.filename, opt.line, opt.column, opt.pass_name, opt.message,
- ));
+ diag_handler.emit_note(FromLlvmOptimizationDiag {
+ filename: &opt.filename,
+ line: opt.line,
+ column: opt.column,
+ pass_name: &opt.pass_name,
+ message: &opt.message,
+ });
}
}
llvm::diagnostic::PGO(diagnostic_ref) | llvm::diagnostic::Linker(diagnostic_ref) => {
- let msg = llvm::build_string(|s| {
+ let message = llvm::build_string(|s| {
llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
})
.expect("non-UTF8 diagnostic");
- diag_handler.warn(&msg);
+ diag_handler.emit_warning(FromLlvmDiag { message });
}
llvm::diagnostic::Unsupported(diagnostic_ref) => {
- let msg = llvm::build_string(|s| {
+ let message = llvm::build_string(|s| {
llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
})
.expect("non-UTF8 diagnostic");
- diag_handler.err(&msg);
+ diag_handler.emit_err(FromLlvmDiag { message });
}
llvm::diagnostic::UnknownDiagnostic(..) => {}
}
@@ -409,11 +412,7 @@ fn get_pgo_sample_use_path(config: &ModuleConfig) -> Option<CString> {
}
fn get_instr_profile_output_path(config: &ModuleConfig) -> Option<CString> {
- if config.instrument_coverage {
- Some(CString::new("default_%m_%p.profraw").unwrap())
- } else {
- None
- }
+ config.instrument_coverage.then(|| CString::new("default_%m_%p.profraw").unwrap())
}
pub(crate) unsafe fn llvm_optimize(
@@ -443,16 +442,19 @@ pub(crate) unsafe fn llvm_optimize(
sanitize_thread: config.sanitizer.contains(SanitizerSet::THREAD),
sanitize_hwaddress: config.sanitizer.contains(SanitizerSet::HWADDRESS),
sanitize_hwaddress_recover: config.sanitizer_recover.contains(SanitizerSet::HWADDRESS),
+ sanitize_kernel_address: config.sanitizer.contains(SanitizerSet::KERNELADDRESS),
+ sanitize_kernel_address_recover: config
+ .sanitizer_recover
+ .contains(SanitizerSet::KERNELADDRESS),
})
} else {
None
};
- let mut llvm_profiler = if cgcx.prof.llvm_recording_enabled() {
- Some(LlvmSelfProfiler::new(cgcx.prof.get_self_profiler().unwrap()))
- } else {
- None
- };
+ let mut llvm_profiler = cgcx
+ .prof
+ .llvm_recording_enabled()
+ .then(|| LlvmSelfProfiler::new(cgcx.prof.get_self_profiler().unwrap()));
let llvm_selfprofiler =
llvm_profiler.as_mut().map(|s| s as *mut _ as *mut c_void).unwrap_or(std::ptr::null_mut());
@@ -494,7 +496,7 @@ pub(crate) unsafe fn llvm_optimize(
llvm_plugins.as_ptr().cast(),
llvm_plugins.len(),
);
- result.into_result().map_err(|()| llvm_err(diag_handler, "failed to run LLVM passes"))
+ result.into_result().map_err(|()| llvm_err(diag_handler, LlvmError::RunLlvmPasses))
}
// Unsafe due to LLVM calls.
@@ -547,8 +549,7 @@ pub(crate) fn link(
let _timer = cgcx.prof.generic_activity_with_arg("LLVM_link_module", &*module.name);
let buffer = ModuleBuffer::new(module.module_llvm.llmod());
linker.add(buffer.data()).map_err(|()| {
- let msg = format!("failed to serialize module {:?}", module.name);
- llvm_err(diag_handler, &msg)
+ llvm_err(diag_handler, LlvmError::SerializeModule { name: &module.name })
})?;
}
drop(linker);
@@ -626,9 +627,8 @@ pub(crate) unsafe fn codegen(
let _timer = cgcx
.prof
.generic_activity_with_arg("LLVM_module_codegen_emit_bitcode", &*module.name);
- if let Err(e) = fs::write(&bc_out, data) {
- let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e);
- diag_handler.err(&msg);
+ if let Err(err) = fs::write(&bc_out, data) {
+ diag_handler.emit_err(WriteBytecode { path: &bc_out, err });
}
}
@@ -678,10 +678,9 @@ pub(crate) unsafe fn codegen(
record_artifact_size(&cgcx.prof, "llvm_ir", &out);
}
- result.into_result().map_err(|()| {
- let msg = format!("failed to write LLVM IR to {}", out.display());
- llvm_err(diag_handler, &msg)
- })?;
+ result
+ .into_result()
+ .map_err(|()| llvm_err(diag_handler, LlvmError::WriteIr { path: &out }))?;
}
if config.emit_asm {
@@ -749,8 +748,8 @@ pub(crate) unsafe fn codegen(
EmitObj::Bitcode => {
debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
- if let Err(e) = link_or_copy(&bc_out, &obj_out) {
- diag_handler.err(&format!("failed to copy bitcode to object file: {}", e));
+ if let Err(err) = link_or_copy(&bc_out, &obj_out) {
+ diag_handler.emit_err(CopyBitcode { err });
}
if !config.emit_bc {
@@ -762,6 +761,7 @@ pub(crate) unsafe fn codegen(
EmitObj::None => {}
}
+ record_llvm_cgu_instructions_stats(&cgcx.prof, llmod);
drop(handlers);
}
@@ -975,3 +975,23 @@ fn record_artifact_size(
self_profiler_ref.artifact_size(artifact_kind, artifact_name.to_string_lossy(), file_size);
}
}
+
+fn record_llvm_cgu_instructions_stats(prof: &SelfProfilerRef, llmod: &llvm::Module) {
+ if !prof.enabled() {
+ return;
+ }
+
+ let raw_stats =
+ llvm::build_string(|s| unsafe { llvm::LLVMRustModuleInstructionStats(&llmod, s) })
+ .expect("cannot get module instruction stats");
+
+ #[derive(serde::Deserialize)]
+ struct InstructionsStats {
+ module: String,
+ total: u64,
+ }
+
+ let InstructionsStats { module, total } =
+ serde_json::from_str(&raw_stats).expect("cannot parse llvm cgu instructions stats");
+ prof.artifact_size("cgu_instructions", module, total);
+}
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index 5e98deae4..0f33b9854 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -511,7 +511,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
bx.range_metadata(load, scalar.valid_range(bx));
}
}
- abi::Pointer => {
+ abi::Pointer(_) => {
if !scalar.valid_range(bx).contains(0) {
bx.nonnull_metadata(load);
}
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
index f1d01a460..6ee2a05ff 100644
--- a/compiler/rustc_codegen_llvm/src/callee.rs
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -13,7 +13,7 @@ use crate::value::Value;
use rustc_codegen_ssa::traits::*;
use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
-use rustc_middle::ty::{self, Instance, TypeVisitable};
+use rustc_middle::ty::{self, Instance, TypeVisitableExt};
/// Codegens a reference to a fn/method item, monomorphizing and
/// inlining as it goes.
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
index acee9134f..b0a9a30ab 100644
--- a/compiler/rustc_codegen_llvm/src/common.rs
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -10,6 +10,7 @@ use crate::value::Value;
use rustc_ast::Mutability;
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_hir::def_id::DefId;
use rustc_middle::bug;
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
@@ -236,7 +237,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
Scalar::Int(int) => {
let data = int.assert_bits(layout.size(self));
let llval = self.const_uint_big(self.type_ix(bitsize), data);
- if layout.primitive() == Pointer {
+ if matches!(layout.primitive(), Pointer(_)) {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else {
self.const_bitcast(llval, llty)
@@ -252,8 +253,13 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
_ => self.static_addr_of(init, alloc.align, None),
};
- if !self.sess().fewer_names() {
- llvm::set_value_name(value, format!("{:?}", alloc_id).as_bytes());
+ if !self.sess().fewer_names() && llvm::get_value_name(value).is_empty() {
+ let hash = self.tcx.with_stable_hashing_context(|mut hcx| {
+ let mut hasher = StableHasher::new();
+ alloc.hash_stable(&mut hcx, &mut hasher);
+ hasher.finish::<u128>()
+ });
+ llvm::set_value_name(value, format!("alloc_{hash:032x}").as_bytes());
}
(value, AddressSpace::DATA)
}
@@ -284,7 +290,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
1,
)
};
- if layout.primitive() != Pointer {
+ if !matches!(layout.primitive(), Pointer(_)) {
unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
} else {
self.const_bitcast(llval, llty)
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index 16467b614..9116e71be 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -3,7 +3,6 @@ use crate::common::{self, CodegenCx};
use crate::debuginfo;
use crate::errors::{InvalidMinimumAlignment, SymbolAlreadyDefined};
use crate::llvm::{self, True};
-use crate::llvm_util;
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
@@ -13,7 +12,7 @@ use rustc_codegen_ssa::traits::*;
use rustc_hir::def_id::DefId;
use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
use rustc_middle::mir::interpret::{
- read_target_uint, Allocation, ConstAllocation, ErrorHandled, GlobalAlloc, InitChunk, Pointer,
+ read_target_uint, Allocation, ConstAllocation, ErrorHandled, InitChunk, Pointer,
Scalar as InterpScalar,
};
use rustc_middle::mir::mono::MonoItem;
@@ -21,9 +20,7 @@ use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::ty::{self, Instance, Ty};
use rustc_middle::{bug, span_bug};
use rustc_session::config::Lto;
-use rustc_target::abi::{
- AddressSpace, Align, HasDataLayout, Primitive, Scalar, Size, WrappingRange,
-};
+use rustc_target::abi::{Align, HasDataLayout, Primitive, Scalar, Size, WrappingRange};
use std::ops::Range;
pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<'_>) -> &'ll Value {
@@ -58,13 +55,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
// to avoid the cost of generating large complex const expressions.
// For example, `[(u32, u8); 1024 * 1024]` contains uninit padding in each element,
// and would result in `{ [5 x i8] zeroinitializer, [3 x i8] undef, ...repeat 1M times... }`.
- let max = if llvm_util::get_version() < (14, 0, 0) {
- // Generating partially-uninit consts inhibits optimizations in LLVM < 14.
- // See https://github.com/rust-lang/rust/issues/84565.
- 1
- } else {
- cx.sess().opts.unstable_opts.uninit_const_chunk_threshold
- };
+ let max = cx.sess().opts.unstable_opts.uninit_const_chunk_threshold;
let allow_uninit_chunks = chunks.clone().take(max.saturating_add(1)).count() <= max;
if allow_uninit_chunks {
@@ -98,12 +89,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
.expect("const_alloc_to_llvm: could not read relocation pointer")
as u64;
- let address_space = match cx.tcx.global_alloc(alloc_id) {
- GlobalAlloc::Function(..) => cx.data_layout().instruction_address_space,
- GlobalAlloc::Static(..) | GlobalAlloc::Memory(..) | GlobalAlloc::VTable(..) => {
- AddressSpace::DATA
- }
- };
+ let address_space = cx.tcx.global_alloc(alloc_id).address_space(cx);
llvals.push(cx.scalar_to_backend(
InterpScalar::from_pointer(
@@ -111,7 +97,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
&cx.tcx,
),
Scalar::Initialized {
- value: Primitive::Pointer,
+ value: Primitive::Pointer(address_space),
valid_range: WrappingRange::full(dl.pointer_size),
},
cx.type_i8p_ext(address_space),
@@ -535,7 +521,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
// The semantics of #[used] in Rust only require the symbol to make it into the
// object file. It is explicitly allowed for the linker to strip the symbol if it
- // is dead, which means we are allowed use `llvm.compiler.used` instead of
+ // is dead, which means we are allowed to use `llvm.compiler.used` instead of
// `llvm.used` here.
//
// Additionally, https://reviews.llvm.org/D97448 in LLVM 13 started emitting unique
@@ -546,7 +532,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
// That said, we only ever emit these when compiling for ELF targets, unless
// `#[used(compiler)]` is explicitly requested. This is to avoid similar breakage
// on other targets, in particular MachO targets have *their* static constructor
- // lists broken if `llvm.compiler.used` is emitted rather than llvm.used. However,
+ // lists broken if `llvm.compiler.used` is emitted rather than `llvm.used`. However,
// that check happens when assigning the `CodegenFnAttrFlags` in `rustc_hir_analysis`,
// so we don't need to take care of it here.
self.add_compiler_used_global(g);
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index d9ccba07a..3d29968d5 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -143,24 +143,15 @@ pub unsafe fn create_module<'ll>(
let mut target_data_layout = sess.target.data_layout.to_string();
let llvm_version = llvm_util::get_version();
- if llvm_version < (14, 0, 0) {
- if sess.target.llvm_target == "i686-pc-windows-msvc"
- || sess.target.llvm_target == "i586-pc-windows-msvc"
- {
- target_data_layout =
- "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:32-n8:16:32-a:0:32-S32"
- .to_string();
- }
- if sess.target.arch == "wasm32" {
- target_data_layout = target_data_layout.replace("-p10:8:8-p20:8:8", "");
- }
- }
if llvm_version < (16, 0, 0) {
if sess.target.arch == "s390x" {
+ // LLVM 16 data layout changed to always set 64-bit vector alignment,
+ // which is conditional in earlier LLVM versions.
+ // https://reviews.llvm.org/D131158 for the discussion.
target_data_layout = target_data_layout.replace("-v128:64", "");
- }
-
- if sess.target.arch == "riscv64" {
+ } else if sess.target.arch == "riscv64" {
+ // LLVM 16 introduced this change so as to produce more efficient code.
+ // See https://reviews.llvm.org/D116735 for the discussion.
target_data_layout = target_data_layout.replace("-n32:64-", "-n64-");
}
}
@@ -191,7 +182,7 @@ pub unsafe fn create_module<'ll>(
//
// FIXME(#34960)
let cfg_llvm_root = option_env!("CFG_LLVM_ROOT").unwrap_or("");
- let custom_llvm_used = cfg_llvm_root.trim() != "";
+ let custom_llvm_used = !cfg_llvm_root.trim().is_empty();
if !custom_llvm_used && target_data_layout != llvm_data_layout {
bug!(
@@ -416,12 +407,8 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
let (llcx, llmod) = (&*llvm_module.llcx, llvm_module.llmod());
- let coverage_cx = if tcx.sess.instrument_coverage() {
- let covctx = coverageinfo::CrateCoverageContext::new();
- Some(covctx)
- } else {
- None
- };
+ let coverage_cx =
+ tcx.sess.instrument_coverage().then(coverageinfo::CrateCoverageContext::new);
let dbg_cx = if tcx.sess.opts.debuginfo != DebugInfo::None {
let dctx = debuginfo::CodegenUnitDebugContext::new(llmod);
@@ -533,14 +520,9 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
let tcx = self.tcx;
let llfn = match tcx.lang_items().eh_personality() {
Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr(
- ty::Instance::resolve(
- tcx,
- ty::ParamEnv::reveal_all(),
- def_id,
- tcx.intern_substs(&[]),
- )
- .unwrap()
- .unwrap(),
+ ty::Instance::resolve(tcx, ty::ParamEnv::reveal_all(), def_id, ty::List::empty())
+ .unwrap()
+ .unwrap(),
),
_ => {
let name = if wants_msvc_seh(self.sess()) {
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index 22c61248b..240a9d2f3 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -1,6 +1,5 @@
use crate::common::CodegenCx;
use crate::coverageinfo;
-use crate::errors::InstrumentCoverageRequiresLLVM12;
use crate::llvm;
use llvm::coverageinfo::CounterMappingRegion;
@@ -19,8 +18,8 @@ use std::ffi::CString;
/// Generates and exports the Coverage Map.
///
-/// Rust Coverage Map generation supports LLVM Coverage Mapping Format versions
-/// 5 (LLVM 12, only) and 6 (zero-based encoded as 4 and 5, respectively), as defined at
+/// Rust Coverage Map generation supports LLVM Coverage Mapping Format version
+/// 6 (zero-based encoded as 5), as defined at
/// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format).
/// These versions are supported by the LLVM coverage tools (`llvm-profdata` and `llvm-cov`)
/// bundled with Rust's fork of LLVM.
@@ -33,13 +32,10 @@ use std::ffi::CString;
pub fn finalize(cx: &CodegenCx<'_, '_>) {
let tcx = cx.tcx;
- // Ensure the installed version of LLVM supports at least Coverage Map
- // Version 5 (encoded as a zero-based value: 4), which was introduced with
- // LLVM 12.
+ // Ensure the installed version of LLVM supports Coverage Map Version 6
+ // (encoded as a zero-based value: 5), which was introduced with LLVM 13.
let version = coverageinfo::mapping_version();
- if version < 4 {
- tcx.sess.emit_fatal(InstrumentCoverageRequiresLLVM12);
- }
+ assert_eq!(version, 5, "The `CoverageMappingVersion` exposed by `llvm-wrapper` is out of sync");
debug!("Generating coverage map for CodegenUnit: `{}`", cx.codegen_unit.name());
@@ -61,7 +57,7 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
return;
}
- let mut mapgen = CoverageMapGenerator::new(tcx, version);
+ let mut mapgen = CoverageMapGenerator::new(tcx);
// Encode coverage mappings and generate function records
let mut function_data = Vec::new();
@@ -124,25 +120,18 @@ struct CoverageMapGenerator {
}
impl CoverageMapGenerator {
- fn new(tcx: TyCtxt<'_>, version: u32) -> Self {
+ fn new(tcx: TyCtxt<'_>) -> Self {
let mut filenames = FxIndexSet::default();
- if version >= 5 {
- // LLVM Coverage Mapping Format version 6 (zero-based encoded as 5)
- // requires setting the first filename to the compilation directory.
- // Since rustc generates coverage maps with relative paths, the
- // compilation directory can be combined with the relative paths
- // to get absolute paths, if needed.
- let working_dir = tcx
- .sess
- .opts
- .working_dir
- .remapped_path_if_available()
- .to_string_lossy()
- .to_string();
- let c_filename =
- CString::new(working_dir).expect("null error converting filename to C string");
- filenames.insert(c_filename);
- }
+ // LLVM Coverage Mapping Format version 6 (zero-based encoded as 5)
+ // requires setting the first filename to the compilation directory.
+ // Since rustc generates coverage maps with relative paths, the
+ // compilation directory can be combined with the relative paths
+ // to get absolute paths, if needed.
+ let working_dir =
+ tcx.sess.opts.working_dir.remapped_path_if_available().to_string_lossy().to_string();
+ let c_filename =
+ CString::new(working_dir).expect("null error converting filename to C string");
+ filenames.insert(c_filename);
Self { filenames }
}
@@ -306,9 +295,8 @@ fn add_unused_functions(cx: &CodegenCx<'_, '_>) {
DefKind::Fn | DefKind::AssocFn | DefKind::Closure | DefKind::Generator
) {
return None;
- } else if ignore_unused_generics
- && tcx.generics_of(def_id).requires_monomorphization(tcx)
- {
+ }
+ if ignore_unused_generics && tcx.generics_of(def_id).requires_monomorphization(tcx) {
return None;
}
Some(local_def_id.to_def_id())
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index ace15cfb0..3dc0ac033 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -27,8 +27,6 @@ use rustc_middle::ty::Instance;
use std::cell::RefCell;
use std::ffi::CString;
-use std::iter;
-
pub mod mapgen;
const UNUSED_FUNCTION_COUNTER_ID: CounterValueReference = CounterValueReference::START;
@@ -201,7 +199,7 @@ fn declare_unused_fn<'tcx>(cx: &CodegenCx<'_, 'tcx>, def_id: DefId) -> Instance<
tcx.symbol_name(instance).name,
cx.fn_abi_of_fn_ptr(
ty::Binder::dummy(tcx.mk_fn_sig(
- iter::once(tcx.mk_unit()),
+ [tcx.mk_unit()],
tcx.mk_unit(),
false,
hir::Unsafety::Unsafe,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index b6eb5ee18..c1b3f34e5 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -132,7 +132,7 @@ fn build_fixed_size_array_di_node<'ll, 'tcx>(
let (size, align) = cx.size_and_align_of(array_type);
- let upper_bound = len.eval_usize(cx.tcx, ty::ParamEnv::reveal_all()) as c_longlong;
+ let upper_bound = len.eval_target_usize(cx.tcx, ty::ParamEnv::reveal_all()) as c_longlong;
let subrange =
unsafe { Some(llvm::LLVMRustDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound)) };
@@ -1499,6 +1499,11 @@ pub fn create_vtable_di_node<'ll, 'tcx>(
return;
}
+ // When full debuginfo is enabled, we want to try and prevent vtables from being
+ // merged. Otherwise debuggers will have a hard time mapping from dyn pointer
+ // to concrete type.
+ llvm::SetUnnamedAddress(vtable, llvm::UnnamedAddr::No);
+
let vtable_name =
compute_debuginfo_vtable_name(cx.tcx, ty, poly_trait_ref, VTableNameKind::GlobalVariable);
let vtable_type_di_node = build_vtable_type_di_node(cx, ty, poly_trait_ref);
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
index 564ab351b..54e850f25 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
@@ -122,7 +122,8 @@ fn tag_base_type<'ll, 'tcx>(
Primitive::Int(t, _) => t,
Primitive::F32 => Integer::I32,
Primitive::F64 => Integer::I64,
- Primitive::Pointer => {
+ // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
+ Primitive::Pointer(_) => {
// If the niche is the NULL value of a reference, then `discr_enum_ty` will be
// a RawPtr. CodeView doesn't know what to do with enums whose base type is a
// pointer so we fix this up to just be `usize`.
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index ca7a07d83..5392534cf 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -27,7 +27,7 @@ use rustc_index::vec::IndexVec;
use rustc_middle::mir;
use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
-use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TypeVisitable};
+use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TypeVisitableExt};
use rustc_session::config::{self, DebugInfo};
use rustc_session::Session;
use rustc_span::symbol::Symbol;
@@ -508,7 +508,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions(
instance.substs,
ty::ParamEnv::reveal_all(),
- cx.tcx.type_of(impl_def_id),
+ cx.tcx.type_of(impl_def_id).skip_binder(),
);
// Only "class" methods are generally understood by LLVM,
diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs
index b46209972..bae88d942 100644
--- a/compiler/rustc_codegen_llvm/src/errors.rs
+++ b/compiler/rustc_codegen_llvm/src/errors.rs
@@ -1,10 +1,12 @@
use std::borrow::Cow;
-
-use rustc_errors::fluent;
-use rustc_errors::DiagnosticBuilder;
-use rustc_errors::ErrorGuaranteed;
-use rustc_errors::Handler;
-use rustc_errors::IntoDiagnostic;
+use std::ffi::CString;
+use std::path::Path;
+
+use crate::fluent_generated as fluent;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_errors::{
+ DiagnosticBuilder, EmissionGuarantee, ErrorGuaranteed, Handler, IntoDiagnostic,
+};
use rustc_macros::{Diagnostic, Subdiagnostic};
use rustc_span::Span;
@@ -26,9 +28,9 @@ pub(crate) struct UnknownCTargetFeature<'a> {
#[derive(Subdiagnostic)]
pub(crate) enum PossibleFeature<'a> {
- #[help(possible_feature)]
+ #[help(codegen_llvm_possible_feature)]
Some { rust_feature: &'a str },
- #[help(consider_filing_feature_request)]
+ #[help(codegen_llvm_consider_filing_feature_request)]
None,
}
@@ -40,10 +42,6 @@ pub(crate) struct ErrorCreatingImportLibrary<'a> {
}
#[derive(Diagnostic)]
-#[diag(codegen_llvm_instrument_coverage_requires_llvm_12)]
-pub(crate) struct InstrumentCoverageRequiresLLVM12;
-
-#[derive(Diagnostic)]
#[diag(codegen_llvm_symbol_already_defined)]
pub(crate) struct SymbolAlreadyDefined<'a> {
#[primary_span]
@@ -85,10 +83,18 @@ pub(crate) struct DlltoolFailImportLibrary<'a> {
#[note]
pub(crate) struct DynamicLinkingWithLTO;
-#[derive(Diagnostic)]
-#[diag(codegen_llvm_fail_parsing_target_machine_config_to_target_machine)]
-pub(crate) struct FailParsingTargetMachineConfigToTargetMachine {
- pub error: String,
+pub(crate) struct ParseTargetMachineConfig<'a>(pub LlvmError<'a>);
+
+impl<EM: EmissionGuarantee> IntoDiagnostic<'_, EM> for ParseTargetMachineConfig<'_> {
+ fn into_diagnostic(self, sess: &'_ Handler) -> DiagnosticBuilder<'_, EM> {
+ let diag: DiagnosticBuilder<'_, EM> = self.0.into_diagnostic(sess);
+ let (message, _) = diag.styled_message().first().expect("`LlvmError` with no message");
+ let message = sess.eagerly_translate_to_string(message.clone(), diag.args());
+
+ let mut diag = sess.struct_diagnostic(fluent::codegen_llvm_parse_target_machine_config);
+ diag.set_arg("error", message);
+ diag
+ }
}
pub(crate) struct TargetFeatureDisableOrEnable<'a> {
@@ -114,3 +120,99 @@ impl IntoDiagnostic<'_, ErrorGuaranteed> for TargetFeatureDisableOrEnable<'_> {
diag
}
}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_lto_disallowed)]
+pub(crate) struct LtoDisallowed;
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_lto_dylib)]
+pub(crate) struct LtoDylib;
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_lto_bitcode_from_rlib)]
+pub(crate) struct LtoBitcodeFromRlib {
+ pub llvm_err: String,
+}
+
+#[derive(Diagnostic)]
+pub enum LlvmError<'a> {
+ #[diag(codegen_llvm_write_output)]
+ WriteOutput { path: &'a Path },
+ #[diag(codegen_llvm_target_machine)]
+ CreateTargetMachine { triple: SmallCStr },
+ #[diag(codegen_llvm_run_passes)]
+ RunLlvmPasses,
+ #[diag(codegen_llvm_serialize_module)]
+ SerializeModule { name: &'a str },
+ #[diag(codegen_llvm_write_ir)]
+ WriteIr { path: &'a Path },
+ #[diag(codegen_llvm_prepare_thin_lto_context)]
+ PrepareThinLtoContext,
+ #[diag(codegen_llvm_load_bitcode)]
+ LoadBitcode { name: CString },
+ #[diag(codegen_llvm_write_thinlto_key)]
+ WriteThinLtoKey { err: std::io::Error },
+ #[diag(codegen_llvm_multiple_source_dicompileunit)]
+ MultipleSourceDiCompileUnit,
+ #[diag(codegen_llvm_prepare_thin_lto_module)]
+ PrepareThinLtoModule,
+ #[diag(codegen_llvm_parse_bitcode)]
+ ParseBitcode,
+}
+
+pub(crate) struct WithLlvmError<'a>(pub LlvmError<'a>, pub String);
+
+impl<EM: EmissionGuarantee> IntoDiagnostic<'_, EM> for WithLlvmError<'_> {
+ fn into_diagnostic(self, sess: &'_ Handler) -> DiagnosticBuilder<'_, EM> {
+ use LlvmError::*;
+ let msg_with_llvm_err = match &self.0 {
+ WriteOutput { .. } => fluent::codegen_llvm_write_output_with_llvm_err,
+ CreateTargetMachine { .. } => fluent::codegen_llvm_target_machine_with_llvm_err,
+ RunLlvmPasses => fluent::codegen_llvm_run_passes_with_llvm_err,
+ SerializeModule { .. } => fluent::codegen_llvm_serialize_module_with_llvm_err,
+ WriteIr { .. } => fluent::codegen_llvm_write_ir_with_llvm_err,
+ PrepareThinLtoContext => fluent::codegen_llvm_prepare_thin_lto_context_with_llvm_err,
+ LoadBitcode { .. } => fluent::codegen_llvm_load_bitcode_with_llvm_err,
+ WriteThinLtoKey { .. } => fluent::codegen_llvm_write_thinlto_key_with_llvm_err,
+ MultipleSourceDiCompileUnit => {
+ fluent::codegen_llvm_multiple_source_dicompileunit_with_llvm_err
+ }
+ PrepareThinLtoModule => fluent::codegen_llvm_prepare_thin_lto_module_with_llvm_err,
+ ParseBitcode => fluent::codegen_llvm_parse_bitcode_with_llvm_err,
+ };
+ let mut diag = self.0.into_diagnostic(sess);
+ diag.set_primary_message(msg_with_llvm_err);
+ diag.set_arg("llvm_err", self.1);
+ diag
+ }
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_from_llvm_optimization_diag)]
+pub(crate) struct FromLlvmOptimizationDiag<'a> {
+ pub filename: &'a str,
+ pub line: std::ffi::c_uint,
+ pub column: std::ffi::c_uint,
+ pub pass_name: &'a str,
+ pub message: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_from_llvm_diag)]
+pub(crate) struct FromLlvmDiag {
+ pub message: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_write_bytecode)]
+pub(crate) struct WriteBytecode<'a> {
+ pub path: &'a Path,
+ pub err: std::io::Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_copy_bitcode)]
+pub(crate) struct CopyBitcode {
+ pub err: std::io::Error,
+}
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index a6a75eff9..39afb4af6 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -22,7 +22,6 @@ use rustc_target::abi::{self, Align, HasDataLayout, Primitive};
use rustc_target::spec::{HasTargetSpec, PanicStrategy};
use std::cmp::Ordering;
-use std::iter;
fn get_simple_intrinsic<'ll>(
cx: &CodegenCx<'ll, '_>,
@@ -149,7 +148,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
emit_va_arg(self, args[0], ret_ty)
}
}
- Primitive::F64 | Primitive::Pointer => {
+ Primitive::F64 | Primitive::Pointer(_) => {
emit_va_arg(self, args[0], ret_ty)
}
// `va_arg` should never be used with the return type f32.
@@ -798,7 +797,7 @@ fn get_rust_try_fn<'ll, 'tcx>(
let i8p = tcx.mk_mut_ptr(tcx.types.i8);
// `unsafe fn(*mut i8) -> ()`
let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
- iter::once(i8p),
+ [i8p],
tcx.mk_unit(),
false,
hir::Unsafety::Unsafe,
@@ -806,7 +805,7 @@ fn get_rust_try_fn<'ll, 'tcx>(
)));
// `unsafe fn(*mut i8, *mut i8) -> ()`
let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
- [i8p, i8p].iter().cloned(),
+ [i8p, i8p],
tcx.mk_unit(),
false,
hir::Unsafety::Unsafe,
@@ -814,7 +813,7 @@ fn get_rust_try_fn<'ll, 'tcx>(
)));
// `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
- [try_fn_ty, i8p, catch_fn_ty].into_iter(),
+ [try_fn_ty, i8p, catch_fn_ty],
tcx.types.i32,
false,
hir::Unsafety::Unsafe,
@@ -877,7 +876,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
ty::Array(elem, len)
if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
- && len.try_eval_usize(bx.tcx, ty::ParamEnv::reveal_all())
+ && len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all())
== Some(expected_bytes) =>
{
let place = PlaceRef::alloca(bx, args[0].layout);
@@ -957,9 +956,9 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
// version of this intrinsic.
match args[2].layout.ty.kind() {
ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
- len.try_eval_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(|| {
- span_bug!(span, "could not evaluate shuffle index array length")
- })
+ len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(
+ || span_bug!(span, "could not evaluate shuffle index array length"),
+ )
}
_ => return_error!(InvalidMonomorphization::SimdShuffle {
span,
@@ -1123,7 +1122,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}
ty::Array(elem, len)
if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
- && len.try_eval_usize(bx.tcx, ty::ParamEnv::reveal_all())
+ && len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all())
== Some(expected_bytes) =>
{
// Zero-extend iN to the array length:
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index 246e82545..c41e74c51 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -5,11 +5,12 @@
//! This API is completely unstable and subject to change.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(extern_types)]
#![feature(hash_raw_entry)]
+#![feature(iter_intersperse)]
#![feature(let_chains)]
-#![feature(extern_types)]
+#![feature(never_type)]
#![feature(once_cell)]
-#![feature(iter_intersperse)]
#![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)]
#![deny(rustc::untranslatable_diagnostic)]
@@ -22,7 +23,7 @@ extern crate tracing;
use back::write::{create_informational_target_machine, create_target_machine};
-use errors::FailParsingTargetMachineConfigToTargetMachine;
+use errors::ParseTargetMachineConfig;
pub use llvm_util::target_features;
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
@@ -33,7 +34,8 @@ use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::ModuleCodegen;
use rustc_codegen_ssa::{CodegenResults, CompiledModule};
use rustc_data_structures::fx::FxHashMap;
-use rustc_errors::{ErrorGuaranteed, FatalError, Handler};
+use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, FatalError, Handler, SubdiagnosticMessage};
+use rustc_macros::fluent_messages;
use rustc_metadata::EncodedMetadata;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_middle::ty::query::Providers;
@@ -82,6 +84,8 @@ mod type_of;
mod va_arg;
mod value;
+fluent_messages! { "../locales/en-US.ftl" }
+
#[derive(Clone)]
pub struct LlvmCodegenBackend(());
@@ -169,6 +173,7 @@ impl WriteBackendMethods for LlvmCodegenBackend {
type Module = ModuleLlvm;
type ModuleBuffer = back::lto::ModuleBuffer;
type TargetMachine = &'static mut llvm::TargetMachine;
+ type TargetMachineError = crate::errors::LlvmError<'static>;
type ThinData = back::lto::ThinData;
type ThinBuffer = back::lto::ThinBuffer;
fn print_pass_timings(&self) {
@@ -244,6 +249,10 @@ impl LlvmCodegenBackend {
}
impl CodegenBackend for LlvmCodegenBackend {
+ fn locale_resource(&self) -> &'static str {
+ crate::DEFAULT_LOCALE_RESOURCE
+ }
+
fn init(&self, sess: &Session) {
llvm_util::init(sess); // Make sure llvm is inited
}
@@ -416,8 +425,7 @@ impl ModuleLlvm {
let tm = match (cgcx.tm_factory)(tm_factory_config) {
Ok(m) => m,
Err(e) => {
- handler.emit_err(FailParsingTargetMachineConfigToTargetMachine { error: e });
- return Err(FatalError);
+ return Err(handler.emit_almost_fatal(ParseTargetMachineConfig(e)));
}
};
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 8b4861962..253c2ca7c 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -482,6 +482,8 @@ pub struct SanitizerOptions {
pub sanitize_thread: bool,
pub sanitize_hwaddress: bool,
pub sanitize_hwaddress_recover: bool,
+ pub sanitize_kernel_address: bool,
+ pub sanitize_kernel_address_recover: bool,
}
/// LLVMRelocMode
@@ -1812,8 +1814,6 @@ extern "C" {
/// Creates a legacy pass manager -- only used for final codegen.
pub fn LLVMCreatePassManager<'a>() -> &'a mut PassManager<'a>;
- pub fn LLVMInitializePasses();
-
pub fn LLVMTimeTraceProfilerInitialize();
pub fn LLVMTimeTraceProfilerFinishThread();
@@ -2408,6 +2408,8 @@ extern "C" {
pub fn LLVMRustModuleBufferLen(p: &ModuleBuffer) -> usize;
pub fn LLVMRustModuleBufferFree(p: &'static mut ModuleBuffer);
pub fn LLVMRustModuleCost(M: &Module) -> u64;
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustModuleInstructionStats(M: &Module, Str: &RustString);
pub fn LLVMRustThinLTOBufferCreate(M: &Module, is_thin: bool) -> &'static mut ThinLTOBuffer;
pub fn LLVMRustThinLTOBufferFree(M: &'static mut ThinLTOBuffer);
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index 79b243f73..ba58a2e68 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -120,8 +120,6 @@ unsafe fn configure_llvm(sess: &Session) {
llvm::LLVMTimeTraceProfilerInitialize();
}
- llvm::LLVMInitializePasses();
-
rustc_llvm::initialize_available_targets();
llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, llvm_args.as_ptr());
@@ -152,13 +150,7 @@ pub fn time_trace_profiler_finish(file_name: &Path) {
pub fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> SmallVec<[&'a str; 2]> {
let arch = if sess.target.arch == "x86_64" { "x86" } else { &*sess.target.arch };
match (arch, s) {
- ("x86", "sse4.2") => {
- if get_version() >= (14, 0, 0) {
- smallvec!["sse4.2", "crc32"]
- } else {
- smallvec!["sse4.2"]
- }
- }
+ ("x86", "sse4.2") => smallvec!["sse4.2", "crc32"],
("x86", "pclmulqdq") => smallvec!["pclmul"],
("x86", "rdrand") => smallvec!["rdrnd"],
("x86", "bmi1") => smallvec!["bmi"],
@@ -217,7 +209,7 @@ pub fn check_tied_features(
/// Must express features in the way Rust understands them
pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
let target_machine = create_informational_target_machine(sess);
- let mut features: Vec<Symbol> = supported_target_features(sess)
+ supported_target_features(sess)
.iter()
.filter_map(|&(feature, gate)| {
if sess.is_nightly_build() || allow_unstable || gate.is_none() {
@@ -237,16 +229,7 @@ pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
true
})
.map(|feature| Symbol::intern(feature))
- .collect();
-
- // LLVM 14 changed the ABI for i128 arguments to __float/__fix builtins on Win64
- // (see https://reviews.llvm.org/D110413). This unstable target feature is intended for use
- // by compiler-builtins, to export the builtins with the expected, LLVM-version-dependent ABI.
- // The target feature can be dropped once we no longer support older LLVM versions.
- if sess.is_nightly_build() && get_version() >= (14, 0, 0) {
- features.push(Symbol::intern("llvm14-builtins-abi"));
- }
- features
+ .collect()
}
pub fn print_version() {
@@ -494,11 +477,6 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str
.flatten();
features.extend(feats);
- // FIXME: Move v8a to target definition list when earliest supported LLVM is 14.
- if get_version() >= (14, 0, 0) && sess.target.arch == "aarch64" {
- features.push("+v8a".into());
- }
-
if diagnostics && let Some(f) = check_tied_features(sess, &featsmap) {
sess.emit_err(TargetFeatureDisableOrEnable {
features: f,
diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs
index 76f692b20..d0ae36349 100644
--- a/compiler/rustc_codegen_llvm/src/mono_item.rs
+++ b/compiler/rustc_codegen_llvm/src/mono_item.rs
@@ -9,7 +9,7 @@ use rustc_hir::def_id::{DefId, LOCAL_CRATE};
pub use rustc_middle::mir::mono::MonoItem;
use rustc_middle::mir::mono::{Linkage, Visibility};
use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
-use rustc_middle::ty::{self, Instance, TypeVisitable};
+use rustc_middle::ty::{self, Instance, TypeVisitableExt};
use rustc_session::config::CrateType;
use rustc_target::spec::RelocModel;
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index 75cd5df97..e264ce78f 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -1,13 +1,12 @@
use crate::common::*;
use crate::context::TypeLowering;
-use crate::llvm_util::get_version;
use crate::type_::Type;
use rustc_codegen_ssa::traits::*;
use rustc_middle::bug;
use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
-use rustc_middle::ty::{self, Ty, TypeVisitable};
-use rustc_target::abi::{Abi, AddressSpace, Align, FieldsShape};
+use rustc_middle::ty::{self, Ty, TypeVisitableExt};
+use rustc_target::abi::{Abi, Align, FieldsShape};
use rustc_target::abi::{Int, Pointer, F32, F64};
use rustc_target::abi::{PointeeInfo, Scalar, Size, TyAbiInterface, Variants};
use smallvec::{smallvec, SmallVec};
@@ -43,10 +42,8 @@ fn uncached_llvm_type<'a, 'tcx>(
// in problematically distinct types due to HRTB and subtyping (see #47638).
// ty::Dynamic(..) |
ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Generator(..) | ty::Str
- // For performance reasons we use names only when emitting LLVM IR. Unless we are on
- // LLVM < 14, where the use of unnamed types resulted in various issues, e.g., #76213,
- // #79564, and #79246.
- if get_version() < (14, 0, 0) || !cx.sess().fewer_names() =>
+ // For performance reasons we use names only when emitting LLVM IR.
+ if !cx.sess().fewer_names() =>
{
let mut name = with_no_visible_paths!(with_no_trimmed_paths!(layout.ty.to_string()));
if let (&ty::Adt(def, _), &Variants::Single { index }) =
@@ -157,7 +154,7 @@ fn struct_llfields<'a, 'tcx>(
} else {
debug!("struct_llfields: offset: {:?} stride: {:?}", offset, layout.size);
}
- let field_remapping = if padding_used { Some(field_remapping) } else { None };
+ let field_remapping = padding_used.then_some(field_remapping);
(result, packed, field_remapping)
}
@@ -312,14 +309,13 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
Int(i, _) => cx.type_from_integer(i),
F32 => cx.type_f32(),
F64 => cx.type_f64(),
- Pointer => {
+ Pointer(address_space) => {
// If we know the alignment, pick something better than i8.
- let (pointee, address_space) =
- if let Some(pointee) = self.pointee_info_at(cx, offset) {
- (cx.type_pointee_for_align(pointee.align), pointee.address_space)
- } else {
- (cx.type_i8(), AddressSpace::DATA)
- };
+ let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
+ cx.type_pointee_for_align(pointee.align)
+ } else {
+ cx.type_i8()
+ };
cx.type_ptr_to_ext(pointee, address_space)
}
}
@@ -333,7 +329,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
) -> &'a Type {
// HACK(eddyb) special-case fat pointers until LLVM removes
// pointee types, to avoid bitcasting every `OperandRef::deref`.
- match self.ty.kind() {
+ match *self.ty.kind() {
ty::Ref(..) | ty::RawPtr(_) => {
return self.field(cx, index).llvm_type(cx);
}
@@ -343,6 +339,11 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
}
+ // `dyn* Trait` has the same ABI as `*mut dyn Trait`
+ ty::Dynamic(bounds, region, ty::DynStar) => {
+ let ptr_ty = cx.tcx.mk_mut_ptr(cx.tcx.mk_dynamic(bounds, region, ty::Dyn));
+ return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
+ }
_ => {}
}