From 9918693037dce8aa4bb6f08741b6812923486c18 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 19 Jun 2024 11:26:03 +0200 Subject: Merging upstream version 1.76.0+dfsg1. Signed-off-by: Daniel Baumann --- compiler/rustc_codegen_llvm/src/abi.rs | 46 +-- compiler/rustc_codegen_llvm/src/allocator.rs | 10 +- compiler/rustc_codegen_llvm/src/attributes.rs | 18 +- compiler/rustc_codegen_llvm/src/back/archive.rs | 2 +- compiler/rustc_codegen_llvm/src/back/lto.rs | 92 ++--- .../src/back/owned_target_machine.rs | 4 +- compiler/rustc_codegen_llvm/src/back/write.rs | 103 +++--- compiler/rustc_codegen_llvm/src/base.rs | 6 +- compiler/rustc_codegen_llvm/src/builder.rs | 27 +- compiler/rustc_codegen_llvm/src/callee.rs | 4 +- compiler/rustc_codegen_llvm/src/common.rs | 5 +- compiler/rustc_codegen_llvm/src/consts.rs | 18 +- compiler/rustc_codegen_llvm/src/context.rs | 73 ++-- .../rustc_codegen_llvm/src/coverageinfo/mapgen.rs | 14 +- .../rustc_codegen_llvm/src/coverageinfo/mod.rs | 11 + compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs | 7 +- .../rustc_codegen_llvm/src/debuginfo/metadata.rs | 52 ++- .../src/debuginfo/metadata/enums/cpp_like.rs | 25 +- .../src/debuginfo/metadata/enums/mod.rs | 7 +- .../src/debuginfo/metadata/enums/native.rs | 11 +- compiler/rustc_codegen_llvm/src/debuginfo/mod.rs | 9 +- compiler/rustc_codegen_llvm/src/declare.rs | 4 +- compiler/rustc_codegen_llvm/src/errors.rs | 22 +- compiler/rustc_codegen_llvm/src/intrinsic.rs | 201 ++++++++++- compiler/rustc_codegen_llvm/src/lib.rs | 43 +-- compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 394 +++++++++++---------- compiler/rustc_codegen_llvm/src/llvm_util.rs | 31 +- compiler/rustc_codegen_llvm/src/type_.rs | 4 - compiler/rustc_codegen_llvm/src/type_of.rs | 17 +- 29 files changed, 740 insertions(+), 520 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 6e3a4cae2..97dc40125 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -15,7 +15,6 @@ use rustc_middle::ty::layout::LayoutOf; pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; use rustc_middle::ty::Ty; use rustc_session::config; -use rustc_target::abi::call::ArgAbi; pub use rustc_target::abi::call::*; use rustc_target::abi::{self, HasDataLayout, Int}; pub use rustc_target::spec::abi::Abi; @@ -348,50 +347,18 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Direct(_) => { // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges), // and for Scalar ABIs the LLVM type is fully determined by `layout.abi`, - // guarnateeing that we generate ABI-compatible LLVM IR. Things get tricky for - // aggregates... - if matches!(arg.layout.abi, abi::Abi::Aggregate { .. }) { - assert!( - arg.layout.is_sized(), - "`PassMode::Direct` for unsized type: {}", - arg.layout.ty - ); - // This really shouldn't happen, since `immediate_llvm_type` will use - // `layout.fields` to turn this Rust type into an LLVM type. This means all - // sorts of Rust type details leak into the ABI. However wasm sadly *does* - // currently use this mode so we have to allow it -- but we absolutely - // shouldn't let any more targets do that. - // (Also see .) - // - // The unstable abi `PtxKernel` also uses Direct for now. - // It needs to switch to something else before stabilization can happen. - // (See issue: https://github.com/rust-lang/rust/issues/117271) - assert!( - matches!(&*cx.tcx.sess.target.arch, "wasm32" | "wasm64") - || self.conv == Conv::PtxKernel, - "`PassMode::Direct` for aggregates only allowed on wasm and `extern \"ptx-kernel\"` fns\nProblematic type: {:#?}", - arg.layout, - ); - } + // guaranteeing that we generate ABI-compatible LLVM IR. arg.layout.immediate_llvm_type(cx) } PassMode::Pair(..) => { // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges), // so for ScalarPair we can easily be sure that we are generating ABI-compatible // LLVM IR. - assert!( - matches!(arg.layout.abi, abi::Abi::ScalarPair(..)), - "PassMode::Pair for type {}", - arg.layout.ty - ); llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true)); llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true)); continue; } - PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack } => { - // `Indirect` with metadata is only for unsized types, and doesn't work with - // on-stack passing. - assert!(arg.layout.is_unsized() && !on_stack); + PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => { // Construct the type of a (wide) pointer to `ty`, and pass its two fields. // Any two ABI-compatible unsized types have the same metadata type and // moreover the same metadata value leads to the same dynamic size and @@ -402,13 +369,8 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true)); continue; } - PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => { - assert!(arg.layout.is_sized()); - cx.type_ptr() - } + PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => cx.type_ptr(), PassMode::Cast { cast, pad_i32 } => { - // `Cast` means "transmute to `CastType`"; that only makes sense for sized types. - assert!(arg.layout.is_sized()); // add padding if *pad_i32 { llargument_tys.push(Reg::i32().llvm_type(cx)); @@ -530,7 +492,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Cast { cast, pad_i32: _ } => { cast.attrs.apply_attrs_to_callsite( llvm::AttributePlace::ReturnValue, - &bx.cx, + bx.cx, callsite, ); } diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs index db5c1388e..58b3aa438 100644 --- a/compiler/rustc_codegen_llvm/src/allocator.rs +++ b/compiler/rustc_codegen_llvm/src/allocator.rs @@ -67,7 +67,7 @@ pub(crate) unsafe fn codegen( llcx, llmod, "__rust_alloc_error_handler", - &alloc_error_handler_name(alloc_error_handler_kind), + alloc_error_handler_name(alloc_error_handler_kind), &[usize, usize], // size, align None, true, @@ -76,7 +76,7 @@ pub(crate) unsafe fn codegen( // __rust_alloc_error_handler_should_panic let name = OomStrategy::SYMBOL; let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_ptr().cast(), name.len(), i8); - if tcx.sess.target.default_hidden_visibility { + if tcx.sess.default_hidden_visibility() { llvm::LLVMRustSetVisibility(ll_g, llvm::Visibility::Hidden); } let val = tcx.sess.opts.unstable_opts.oom.should_panic(); @@ -85,7 +85,7 @@ pub(crate) unsafe fn codegen( let name = NO_ALLOC_SHIM_IS_UNSTABLE; let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_ptr().cast(), name.len(), i8); - if tcx.sess.target.default_hidden_visibility { + if tcx.sess.default_hidden_visibility() { llvm::LLVMRustSetVisibility(ll_g, llvm::Visibility::Hidden); } let llval = llvm::LLVMConstInt(i8, 0, False); @@ -130,7 +130,7 @@ fn create_wrapper_function( None }; - if tcx.sess.target.default_hidden_visibility { + if tcx.sess.default_hidden_visibility() { llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); } if tcx.sess.must_emit_unwind_tables() { @@ -146,7 +146,7 @@ fn create_wrapper_function( } llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden); - let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast()); + let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, c"entry".as_ptr().cast()); let llbuilder = llvm::LLVMCreateBuilderInContext(llcx); llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb); diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs index b6c01545f..481741bb1 100644 --- a/compiler/rustc_codegen_llvm/src/attributes.rs +++ b/compiler/rustc_codegen_llvm/src/attributes.rs @@ -4,7 +4,7 @@ use rustc_codegen_ssa::traits::*; use rustc_hir::def_id::DefId; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::ty::{self, TyCtxt}; -use rustc_session::config::OptLevel; +use rustc_session::config::{FunctionReturn, OptLevel}; use rustc_span::symbol::sym; use rustc_target::spec::abi::Abi; use rustc_target::spec::{FramePointer, SanitizerSet, StackProbeType, StackProtector}; @@ -118,6 +118,15 @@ pub fn frame_pointer_type_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attr Some(llvm::CreateAttrStringValue(cx.llcx, "frame-pointer", attr_value)) } +fn function_return_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> { + let function_return_attr = match cx.sess().opts.unstable_opts.function_return { + FunctionReturn::Keep => return None, + FunctionReturn::ThunkExtern => AttributeKind::FnRetThunkExtern, + }; + + Some(function_return_attr.create_attr(cx.llcx)) +} + /// Tell LLVM what instrument function to insert. #[inline] fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 4]> { @@ -136,7 +145,7 @@ fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attr attrs.push(llvm::CreateAttrStringValue( cx.llcx, "instrument-function-entry-inlined", - &mcount_name, + mcount_name, )); } if let Some(options) = &cx.sess().opts.unstable_opts.instrument_xray { @@ -331,8 +340,9 @@ pub fn from_fn_attrs<'ll, 'tcx>( to_add.push(llvm::CreateAttrString(cx.llcx, "use-sample-profile")); } - // FIXME: none of these three functions interact with source level attributes. + // FIXME: none of these functions interact with source level attributes. to_add.extend(frame_pointer_type_attr(cx)); + to_add.extend(function_return_attr(cx)); to_add.extend(instrument_function_attr(cx)); to_add.extend(nojumptables_attr(cx)); to_add.extend(probestack_attr(cx)); @@ -459,7 +469,7 @@ pub fn from_fn_attrs<'ll, 'tcx>( // If this function is an import from the environment but the wasm // import has a specific module/name, apply them here. if let Some(module) = wasm_import_module(cx.tcx, instance.def_id()) { - to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-module", &module)); + to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-module", module)); let name = codegen_fn_attrs.link_name.unwrap_or_else(|| cx.tcx.item_name(instance.def_id())); diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs index f33075a88..cf47c94a8 100644 --- a/compiler/rustc_codegen_llvm/src/back/archive.rs +++ b/compiler/rustc_codegen_llvm/src/back/archive.rs @@ -68,7 +68,7 @@ impl<'a> ArchiveBuilder<'a> for LlvmArchiveBuilder<'a> { ) -> io::Result<()> { let mut archive = archive.to_path_buf(); if self.sess.target.llvm_target.contains("-apple-macosx") { - if let Some(new_archive) = try_extract_macho_fat_archive(&self.sess, &archive)? { + if let Some(new_archive) = try_extract_macho_fat_archive(self.sess, &archive)? { archive = new_archive } } diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index db297425b..42bd86870 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -14,7 +14,7 @@ use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::memmap::Mmap; -use rustc_errors::{FatalError, Handler}; +use rustc_errors::{DiagCtxt, FatalError}; use rustc_hir::def_id::LOCAL_CRATE; use rustc_middle::bug; use rustc_middle::dep_graph::WorkProduct; @@ -47,7 +47,7 @@ pub fn crate_type_allows_lto(crate_type: CrateType) -> bool { fn prepare_lto( cgcx: &CodegenContext, - diag_handler: &Handler, + dcx: &DiagCtxt, ) -> Result<(Vec, Vec<(SerializedModule, CString)>), FatalError> { let export_threshold = match cgcx.lto { // We're just doing LTO for our one crate @@ -84,23 +84,23 @@ fn prepare_lto( // Make sure we actually can run LTO for crate_type in cgcx.crate_types.iter() { if !crate_type_allows_lto(*crate_type) { - diag_handler.emit_err(LtoDisallowed); + dcx.emit_err(LtoDisallowed); return Err(FatalError); } else if *crate_type == CrateType::Dylib { if !cgcx.opts.unstable_opts.dylib_lto { - diag_handler.emit_err(LtoDylib); + dcx.emit_err(LtoDylib); return Err(FatalError); } } else if *crate_type == CrateType::ProcMacro { if !cgcx.opts.unstable_opts.dylib_lto { - diag_handler.emit_err(LtoProcMacro); + dcx.emit_err(LtoProcMacro); return Err(FatalError); } } } if cgcx.opts.cg.prefer_dynamic && !cgcx.opts.unstable_opts.dylib_lto { - diag_handler.emit_err(DynamicLinkingWithLTO); + dcx.emit_err(DynamicLinkingWithLTO); return Err(FatalError); } @@ -138,7 +138,7 @@ fn prepare_lto( upstream_modules.push((module, CString::new(name).unwrap())); } Err(e) => { - diag_handler.emit_err(e); + dcx.emit_err(e); return Err(FatalError); } } @@ -200,18 +200,11 @@ pub(crate) fn run_fat( modules: Vec>, cached_modules: Vec<(SerializedModule, WorkProduct)>, ) -> Result, FatalError> { - let diag_handler = cgcx.create_diag_handler(); - let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, &diag_handler)?; + let dcx = cgcx.create_dcx(); + let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, &dcx)?; let symbols_below_threshold = symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::>(); - fat_lto( - cgcx, - &diag_handler, - modules, - cached_modules, - upstream_modules, - &symbols_below_threshold, - ) + fat_lto(cgcx, &dcx, modules, cached_modules, upstream_modules, &symbols_below_threshold) } /// Performs thin LTO by performing necessary global analysis and returning two @@ -222,8 +215,8 @@ pub(crate) fn run_thin( modules: Vec<(String, ThinBuffer)>, cached_modules: Vec<(SerializedModule, WorkProduct)>, ) -> Result<(Vec>, Vec), FatalError> { - let diag_handler = cgcx.create_diag_handler(); - let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, &diag_handler)?; + let dcx = cgcx.create_dcx(); + let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, &dcx)?; let symbols_below_threshold = symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::>(); if cgcx.opts.cg.linker_plugin_lto.enabled() { @@ -232,14 +225,7 @@ pub(crate) fn run_thin( is deferred to the linker" ); } - thin_lto( - cgcx, - &diag_handler, - modules, - upstream_modules, - cached_modules, - &symbols_below_threshold, - ) + thin_lto(cgcx, &dcx, modules, upstream_modules, cached_modules, &symbols_below_threshold) } pub(crate) fn prepare_thin(module: ModuleCodegen) -> (String, ThinBuffer) { @@ -250,7 +236,7 @@ pub(crate) fn prepare_thin(module: ModuleCodegen) -> (String, ThinBu fn fat_lto( cgcx: &CodegenContext, - diag_handler: &Handler, + dcx: &DiagCtxt, modules: Vec>, cached_modules: Vec<(SerializedModule, WorkProduct)>, mut serialized_modules: Vec<(SerializedModule, CString)>, @@ -316,7 +302,7 @@ fn fat_lto( let (buffer, name) = serialized_modules.remove(0); info!("no in-memory regular modules to choose from, parsing {:?}", name); ModuleCodegen { - module_llvm: ModuleLlvm::parse(cgcx, &name, buffer.data(), diag_handler)?, + module_llvm: ModuleLlvm::parse(cgcx, &name, buffer.data(), dcx)?, name: name.into_string().unwrap(), kind: ModuleKind::Regular, } @@ -333,13 +319,8 @@ fn fat_lto( // The linking steps below may produce errors and diagnostics within LLVM // which we'd like to handle and print, so set up our diagnostic handlers // (which get unregistered when they go out of scope below). - let _handler = DiagnosticHandlers::new( - cgcx, - diag_handler, - llcx, - &module, - CodegenDiagnosticsStage::LTO, - ); + let _handler = + DiagnosticHandlers::new(cgcx, dcx, llcx, &module, CodegenDiagnosticsStage::LTO); // For all other modules we codegened we'll need to link them into our own // bitcode. All modules were codegened in their own LLVM context, however, @@ -367,9 +348,7 @@ fn fat_lto( }); info!("linking {:?}", name); let data = bc_decoded.data(); - linker - .add(data) - .map_err(|()| write::llvm_err(diag_handler, LlvmError::LoadBitcode { name }))?; + linker.add(data).map_err(|()| write::llvm_err(dcx, LlvmError::LoadBitcode { name }))?; serialized_bitcode.push(bc_decoded); } drop(linker); @@ -452,7 +431,7 @@ impl Drop for Linker<'_> { /// they all go out of scope. fn thin_lto( cgcx: &CodegenContext, - diag_handler: &Handler, + dcx: &DiagCtxt, modules: Vec<(String, ThinBuffer)>, serialized_modules: Vec<(SerializedModule, CString)>, cached_modules: Vec<(SerializedModule, WorkProduct)>, @@ -527,7 +506,7 @@ fn thin_lto( symbols_below_threshold.as_ptr(), symbols_below_threshold.len() as u32, ) - .ok_or_else(|| write::llvm_err(diag_handler, LlvmError::PrepareThinLtoContext))?; + .ok_or_else(|| write::llvm_err(dcx, LlvmError::PrepareThinLtoContext))?; let data = ThinData(data); @@ -599,7 +578,7 @@ fn thin_lto( // session, overwriting the previous serialized data (if any). if let Some(path) = key_map_path { if let Err(err) = curr_key_map.save_to_file(&path) { - return Err(write::llvm_err(diag_handler, LlvmError::WriteThinLtoKey { err })); + return Err(write::llvm_err(dcx, LlvmError::WriteThinLtoKey { err })); } } @@ -609,7 +588,7 @@ fn thin_lto( pub(crate) fn run_pass_manager( cgcx: &CodegenContext, - diag_handler: &Handler, + dcx: &DiagCtxt, module: &mut ModuleCodegen, thin: bool, ) -> Result<(), FatalError> { @@ -631,13 +610,13 @@ pub(crate) fn run_pass_manager( llvm::LLVMRustAddModuleFlag( module.module_llvm.llmod(), llvm::LLVMModFlagBehavior::Error, - "LTOPostLink\0".as_ptr().cast(), + c"LTOPostLink".as_ptr().cast(), 1, ); } let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO }; let opt_level = config.opt_level.unwrap_or(config::OptLevel::No); - write::llvm_optimize(cgcx, diag_handler, module, config, opt_level, opt_stage)?; + write::llvm_optimize(cgcx, dcx, module, config, opt_level, opt_stage)?; } debug!("lto done"); Ok(()) @@ -721,11 +700,11 @@ pub unsafe fn optimize_thin_module( thin_module: ThinModule, cgcx: &CodegenContext, ) -> Result, FatalError> { - let diag_handler = cgcx.create_diag_handler(); + let dcx = cgcx.create_dcx(); let module_name = &thin_module.shared.module_names[thin_module.idx]; let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, module_name.to_str().unwrap()); - let tm = (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&diag_handler, e))?; + let tm = (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&dcx, e))?; // Right now the implementation we've got only works over serialized // modules, so we create a fresh new LLVM context and parse the module @@ -733,7 +712,7 @@ pub unsafe fn optimize_thin_module( // crates but for locally codegened modules we may be able to reuse // that LLVM Context and Module. let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); - let llmod_raw = parse_module(llcx, module_name, thin_module.data(), &diag_handler)? as *const _; + let llmod_raw = parse_module(llcx, module_name, thin_module.data(), &dcx)? as *const _; let mut module = ModuleCodegen { module_llvm: ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) }, name: thin_module.name().to_string(), @@ -756,7 +735,7 @@ pub unsafe fn optimize_thin_module( let _timer = cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name()); if !llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target) { - return Err(write::llvm_err(&diag_handler, LlvmError::PrepareThinLtoModule)); + return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule)); } save_temp_bitcode(cgcx, &module, "thin-lto-after-rename"); } @@ -766,7 +745,7 @@ pub unsafe fn optimize_thin_module( .prof .generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name()); if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) { - return Err(write::llvm_err(&diag_handler, LlvmError::PrepareThinLtoModule)); + return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule)); } save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve"); } @@ -776,7 +755,7 @@ pub unsafe fn optimize_thin_module( .prof .generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name()); if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) { - return Err(write::llvm_err(&diag_handler, LlvmError::PrepareThinLtoModule)); + return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule)); } save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize"); } @@ -785,7 +764,7 @@ pub unsafe fn optimize_thin_module( let _timer = cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_import", thin_module.name()); if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target) { - return Err(write::llvm_err(&diag_handler, LlvmError::PrepareThinLtoModule)); + return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule)); } save_temp_bitcode(cgcx, &module, "thin-lto-after-import"); } @@ -797,7 +776,7 @@ pub unsafe fn optimize_thin_module( // little differently. { info!("running thin lto passes over {}", module.name); - run_pass_manager(cgcx, &diag_handler, &mut module, true)?; + run_pass_manager(cgcx, &dcx, &mut module, true)?; save_temp_bitcode(cgcx, &module, "thin-lto-after-pm"); } } @@ -816,6 +795,9 @@ impl ThinLTOKeysMap { use std::io::Write; let file = File::create(path)?; let mut writer = io::BufWriter::new(file); + // The entries are loaded back into a hash map in `load_from_file()`, so + // the order in which we write them to file here does not matter. + #[allow(rustc::potential_query_instability)] for (module, key) in &self.keys { writeln!(writer, "{module} {key}")?; } @@ -865,10 +847,10 @@ pub fn parse_module<'a>( cx: &'a llvm::Context, name: &CStr, data: &[u8], - diag_handler: &Handler, + dcx: &DiagCtxt, ) -> Result<&'a llvm::Module, FatalError> { unsafe { llvm::LLVMRustParseBitcodeForLTO(cx, data.as_ptr(), data.len(), name.as_ptr()) - .ok_or_else(|| write::llvm_err(diag_handler, LlvmError::ParseBitcode)) + .ok_or_else(|| write::llvm_err(dcx, LlvmError::ParseBitcode)) } } diff --git a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs index 36484c3c3..28a88dd2e 100644 --- a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs +++ b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs @@ -39,7 +39,7 @@ impl OwnedTargetMachine { split_dwarf_file: &CStr, output_obj_file: &CStr, debug_info_compression: &CStr, - force_emulated_tls: bool, + use_emulated_tls: bool, args_cstr_buff: &[u8], ) -> Result> { assert!(args_cstr_buff.len() > 0); @@ -71,7 +71,7 @@ impl OwnedTargetMachine { split_dwarf_file.as_ptr(), output_obj_file.as_ptr(), debug_info_compression.as_ptr(), - force_emulated_tls, + use_emulated_tls, args_cstr_buff.as_ptr() as *const c_char, args_cstr_buff.len(), ) diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index 9d5204034..074188db9 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -26,14 +26,14 @@ use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{CompiledModule, ModuleCodegen}; use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::small_c_str::SmallCStr; -use rustc_errors::{FatalError, Handler, Level}; +use rustc_errors::{DiagCtxt, FatalError, Level}; use rustc_fs_util::{link_or_copy, path_to_c_string}; use rustc_middle::ty::TyCtxt; use rustc_session::config::{self, Lto, OutputType, Passes, SplitDwarfKind, SwitchWithOptPath}; use rustc_session::Session; use rustc_span::symbol::sym; use rustc_span::InnerSpan; -use rustc_target::spec::{CodeModel, RelocModel, SanitizerSet, SplitDebuginfo}; +use rustc_target::spec::{CodeModel, RelocModel, SanitizerSet, SplitDebuginfo, TlsModel}; use crate::llvm::diagnostic::OptimizationDiagnosticKind; use libc::{c_char, c_int, c_uint, c_void, size_t}; @@ -45,15 +45,15 @@ use std::slice; use std::str; use std::sync::Arc; -pub fn llvm_err<'a>(handler: &rustc_errors::Handler, err: LlvmError<'a>) -> FatalError { +pub fn llvm_err<'a>(dcx: &rustc_errors::DiagCtxt, err: LlvmError<'a>) -> FatalError { match llvm::last_error() { - Some(llvm_err) => handler.emit_almost_fatal(WithLlvmError(err, llvm_err)), - None => handler.emit_almost_fatal(err), + Some(llvm_err) => dcx.emit_almost_fatal(WithLlvmError(err, llvm_err)), + None => dcx.emit_almost_fatal(err), } } pub fn write_output_file<'ll>( - handler: &rustc_errors::Handler, + dcx: &rustc_errors::DiagCtxt, target: &'ll llvm::TargetMachine, pm: &llvm::PassManager<'ll>, m: &'ll llvm::Module, @@ -93,9 +93,7 @@ pub fn write_output_file<'ll>( } } - result - .into_result() - .map_err(|()| llvm_err(handler, LlvmError::WriteOutput { path: output })) + result.into_result().map_err(|()| llvm_err(dcx, LlvmError::WriteOutput { path: output })) } } @@ -105,7 +103,7 @@ pub fn create_informational_target_machine(sess: &Session) -> OwnedTargetMachine // system/tcx is set up. let features = llvm_util::global_llvm_features(sess, false); target_machine_factory(sess, config::OptLevel::No, &features)(config) - .unwrap_or_else(|err| llvm_err(sess.diagnostic(), err).raise()) + .unwrap_or_else(|err| llvm_err(sess.dcx(), err).raise()) } pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTargetMachine { @@ -124,11 +122,11 @@ pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTargetMach let config = TargetMachineFactoryConfig { split_dwarf_file, output_obj_file }; target_machine_factory( - &tcx.sess, + tcx.sess, tcx.backend_optimization_level(()), tcx.global_backend_features(()), )(config) - .unwrap_or_else(|err| llvm_err(tcx.sess.diagnostic(), err).raise()) + .unwrap_or_else(|err| llvm_err(tcx.sess.dcx(), err).raise()) } pub fn to_llvm_opt_settings( @@ -223,7 +221,7 @@ pub fn target_machine_factory( let path_mapping = sess.source_map().path_mapping().clone(); - let force_emulated_tls = sess.target.force_emulated_tls; + let use_emulated_tls = matches!(sess.tls_model(), TlsModel::Emulated); // copy the exe path, followed by path all into one buffer // null terminating them so we can use them as null terminated strings @@ -297,7 +295,7 @@ pub fn target_machine_factory( &split_dwarf_file, &output_obj_file, &debuginfo_compression, - force_emulated_tls, + use_emulated_tls, &args_cstr_buff, ) }) @@ -332,7 +330,7 @@ pub enum CodegenDiagnosticsStage { } pub struct DiagnosticHandlers<'a> { - data: *mut (&'a CodegenContext, &'a Handler), + data: *mut (&'a CodegenContext, &'a DiagCtxt), llcx: &'a llvm::Context, old_handler: Option<&'a llvm::DiagnosticHandler>, } @@ -340,7 +338,7 @@ pub struct DiagnosticHandlers<'a> { impl<'a> DiagnosticHandlers<'a> { pub fn new( cgcx: &'a CodegenContext, - handler: &'a Handler, + dcx: &'a DiagCtxt, llcx: &'a llvm::Context, module: &ModuleCodegen, stage: CodegenDiagnosticsStage, @@ -375,7 +373,7 @@ impl<'a> DiagnosticHandlers<'a> { .and_then(|dir| dir.to_str().and_then(|p| CString::new(p).ok())); let pgo_available = cgcx.opts.cg.profile_use.is_some(); - let data = Box::into_raw(Box::new((cgcx, handler))); + let data = Box::into_raw(Box::new((cgcx, dcx))); unsafe { let old_handler = llvm::LLVMRustContextGetDiagnosticHandler(llcx); llvm::LLVMRustContextConfigureDiagnosticHandler( @@ -429,7 +427,7 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void if user.is_null() { return; } - let (cgcx, diag_handler) = *(user as *const (&CodegenContext, &Handler)); + let (cgcx, dcx) = *(user as *const (&CodegenContext, &DiagCtxt)); match llvm::diagnostic::Diagnostic::unpack(info) { llvm::diagnostic::InlineAsm(inline) => { @@ -437,7 +435,7 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void } llvm::diagnostic::Optimization(opt) => { - diag_handler.emit_note(FromLlvmOptimizationDiag { + dcx.emit_note(FromLlvmOptimizationDiag { filename: &opt.filename, line: opt.line, column: opt.column, @@ -459,14 +457,14 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s) }) .expect("non-UTF8 diagnostic"); - diag_handler.emit_warning(FromLlvmDiag { message }); + dcx.emit_warning(FromLlvmDiag { message }); } llvm::diagnostic::Unsupported(diagnostic_ref) => { let message = llvm::build_string(|s| { llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s) }) .expect("non-UTF8 diagnostic"); - diag_handler.emit_err(FromLlvmDiag { message }); + dcx.emit_err(FromLlvmDiag { message }); } llvm::diagnostic::UnknownDiagnostic(..) => {} } @@ -507,7 +505,7 @@ fn get_instr_profile_output_path(config: &ModuleConfig) -> Option { pub(crate) unsafe fn llvm_optimize( cgcx: &CodegenContext, - diag_handler: &Handler, + dcx: &DiagCtxt, module: &ModuleCodegen, config: &ModuleConfig, opt_level: config::OptLevel, @@ -589,13 +587,13 @@ pub(crate) unsafe fn llvm_optimize( llvm_plugins.as_ptr().cast(), llvm_plugins.len(), ); - result.into_result().map_err(|()| llvm_err(diag_handler, LlvmError::RunLlvmPasses)) + result.into_result().map_err(|()| llvm_err(dcx, LlvmError::RunLlvmPasses)) } // Unsafe due to LLVM calls. pub(crate) unsafe fn optimize( cgcx: &CodegenContext, - diag_handler: &Handler, + dcx: &DiagCtxt, module: &ModuleCodegen, config: &ModuleConfig, ) -> Result<(), FatalError> { @@ -603,8 +601,7 @@ pub(crate) unsafe fn optimize( let llmod = module.module_llvm.llmod(); let llcx = &*module.module_llvm.llcx; - let _handlers = - DiagnosticHandlers::new(cgcx, diag_handler, llcx, module, CodegenDiagnosticsStage::Opt); + let _handlers = DiagnosticHandlers::new(cgcx, dcx, llcx, module, CodegenDiagnosticsStage::Opt); let module_name = module.name.clone(); let module_name = Some(&module_name[..]); @@ -622,14 +619,14 @@ pub(crate) unsafe fn optimize( _ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO, _ => llvm::OptStage::PreLinkNoLTO, }; - return llvm_optimize(cgcx, diag_handler, module, config, opt_level, opt_stage); + return llvm_optimize(cgcx, dcx, module, config, opt_level, opt_stage); } Ok(()) } pub(crate) fn link( cgcx: &CodegenContext, - diag_handler: &Handler, + dcx: &DiagCtxt, mut modules: Vec>, ) -> Result, FatalError> { use super::lto::{Linker, ModuleBuffer}; @@ -642,9 +639,9 @@ pub(crate) fn link( for module in elements { let _timer = cgcx.prof.generic_activity_with_arg("LLVM_link_module", &*module.name); let buffer = ModuleBuffer::new(module.module_llvm.llmod()); - linker.add(buffer.data()).map_err(|()| { - llvm_err(diag_handler, LlvmError::SerializeModule { name: &module.name }) - })?; + linker + .add(buffer.data()) + .map_err(|()| llvm_err(dcx, LlvmError::SerializeModule { name: &module.name }))?; } drop(linker); Ok(modules.remove(0)) @@ -652,7 +649,7 @@ pub(crate) fn link( pub(crate) unsafe fn codegen( cgcx: &CodegenContext, - diag_handler: &Handler, + dcx: &DiagCtxt, module: ModuleCodegen, config: &ModuleConfig, ) -> Result { @@ -663,13 +660,8 @@ pub(crate) unsafe fn codegen( let tm = &*module.module_llvm.tm; let module_name = module.name.clone(); let module_name = Some(&module_name[..]); - let _handlers = DiagnosticHandlers::new( - cgcx, - diag_handler, - llcx, - &module, - CodegenDiagnosticsStage::Codegen, - ); + let _handlers = + DiagnosticHandlers::new(cgcx, dcx, llcx, &module, CodegenDiagnosticsStage::Codegen); if cgcx.msvc_imps_needed { create_msvc_imps(cgcx, llcx, llmod); @@ -728,7 +720,7 @@ pub(crate) unsafe fn codegen( .prof .generic_activity_with_arg("LLVM_module_codegen_emit_bitcode", &*module.name); if let Err(err) = fs::write(&bc_out, data) { - diag_handler.emit_err(WriteBytecode { path: &bc_out, err }); + dcx.emit_err(WriteBytecode { path: &bc_out, err }); } } @@ -778,9 +770,7 @@ pub(crate) unsafe fn codegen( record_artifact_size(&cgcx.prof, "llvm_ir", &out); } - result - .into_result() - .map_err(|()| llvm_err(diag_handler, LlvmError::WriteIr { path: &out }))?; + result.into_result().map_err(|()| llvm_err(dcx, LlvmError::WriteIr { path: &out }))?; } if config.emit_asm { @@ -799,7 +789,7 @@ pub(crate) unsafe fn codegen( }; with_codegen(tm, llmod, config.no_builtins, |cpm| { write_output_file( - diag_handler, + dcx, tm, cpm, llmod, @@ -834,7 +824,7 @@ pub(crate) unsafe fn codegen( with_codegen(tm, llmod, config.no_builtins, |cpm| { write_output_file( - diag_handler, + dcx, tm, cpm, llmod, @@ -849,12 +839,12 @@ pub(crate) unsafe fn codegen( EmitObj::Bitcode => { debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out); if let Err(err) = link_or_copy(&bc_out, &obj_out) { - diag_handler.emit_err(CopyBitcode { err }); + dcx.emit_err(CopyBitcode { err }); } if !config.emit_bc { debug!("removing_bitcode {:?}", bc_out); - ensure_removed(diag_handler, &bc_out); + ensure_removed(dcx, &bc_out); } } @@ -918,6 +908,7 @@ fn target_is_aix(cgcx: &CodegenContext) -> bool { cgcx.opts.target_triple.triple().contains("-aix") } +//FIXME use c string literals here too pub(crate) fn bitcode_section_name(cgcx: &CodegenContext) -> &'static str { if target_is_apple(cgcx) { "__LLVM,__bitcode\0" @@ -990,17 +981,13 @@ unsafe fn embed_bitcode( // reason (see issue #90326 for historical background). let is_aix = target_is_aix(cgcx); let is_apple = target_is_apple(cgcx); - if is_apple - || is_aix - || cgcx.opts.target_triple.triple().starts_with("wasm") - || cgcx.opts.target_triple.triple().starts_with("asmjs") - { + if is_apple || is_aix || cgcx.opts.target_triple.triple().starts_with("wasm") { // We don't need custom section flags, create LLVM globals. let llconst = common::bytes_in_context(llcx, bitcode); let llglobal = llvm::LLVMAddGlobal( llmod, common::val_ty(llconst), - "rustc.embedded.module\0".as_ptr().cast(), + c"rustc.embedded.module".as_ptr().cast(), ); llvm::LLVMSetInitializer(llglobal, llconst); @@ -1013,15 +1000,15 @@ unsafe fn embed_bitcode( let llglobal = llvm::LLVMAddGlobal( llmod, common::val_ty(llconst), - "rustc.embedded.cmdline\0".as_ptr().cast(), + c"rustc.embedded.cmdline".as_ptr().cast(), ); llvm::LLVMSetInitializer(llglobal, llconst); let section = if is_apple { - "__LLVM,__cmdline\0" + c"__LLVM,__cmdline" } else if is_aix { - ".info\0" + c".info" } else { - ".llvmcmd\0" + c".llvmcmd" }; llvm::LLVMSetSection(llglobal, section.as_ptr().cast()); llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage); @@ -1110,7 +1097,7 @@ fn record_llvm_cgu_instructions_stats(prof: &SelfProfilerRef, llmod: &llvm::Modu } let raw_stats = - llvm::build_string(|s| unsafe { llvm::LLVMRustModuleInstructionStats(&llmod, s) }) + llvm::build_string(|s| unsafe { llvm::LLVMRustModuleInstructionStats(llmod, s) }) .expect("cannot get module instruction stats"); #[derive(serde::Deserialize)] diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs index b659fd02e..5dc271ccd 100644 --- a/compiler/rustc_codegen_llvm/src/base.rs +++ b/compiler/rustc_codegen_llvm/src/base.rs @@ -19,8 +19,6 @@ use crate::context::CodegenCx; use crate::llvm; use crate::value::Value; -use cstr::cstr; - use rustc_codegen_ssa::base::maybe_create_entry_wrapper; use rustc_codegen_ssa::mono_item::MonoItemExt; use rustc_codegen_ssa::traits::*; @@ -110,11 +108,11 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen // Create the llvm.used and llvm.compiler.used variables. if !cx.used_statics.borrow().is_empty() { - cx.create_used_variable_impl(cstr!("llvm.used"), &*cx.used_statics.borrow()); + cx.create_used_variable_impl(c"llvm.used", &*cx.used_statics.borrow()); } if !cx.compiler_used_statics.borrow().is_empty() { cx.create_used_variable_impl( - cstr!("llvm.compiler.used"), + c"llvm.compiler.used", &*cx.compiler_used_statics.borrow(), ); } diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 7b259055d..8f60175a6 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -7,7 +7,6 @@ use crate::llvm_util; use crate::type_::Type; use crate::type_of::LayoutLlvmExt; use crate::value::Value; -use cstr::cstr; use libc::{c_char, c_uint}; use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, SynchronizationScope, TypeKind}; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; @@ -27,7 +26,6 @@ use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange}; use rustc_target::spec::{HasTargetSpec, SanitizerSet, Target}; use smallvec::SmallVec; use std::borrow::Cow; -use std::ffi::CStr; use std::iter; use std::ops::Deref; use std::ptr; @@ -47,13 +45,10 @@ impl Drop for Builder<'_, '_, '_> { } } -// FIXME(eddyb) use a checked constructor when they become `const fn`. -const EMPTY_C_STR: &CStr = unsafe { CStr::from_bytes_with_nul_unchecked(b"\0") }; - /// Empty string, to be used where LLVM expects an instruction name, indicating /// that the instruction is to be left unnamed (i.e. numbered, in textual IR). // FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer. -const UNNAMED: *const c_char = EMPTY_C_STR.as_ptr(); +const UNNAMED: *const c_char = c"".as_ptr(); impl<'ll, 'tcx> BackendTypes for Builder<'_, 'll, 'tcx> { type Value = as BackendTypes>::Value; @@ -358,7 +353,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let new_kind = match ty.kind() { Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)), Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)), - t @ (Uint(_) | Int(_)) => t.clone(), + t @ (Uint(_) | Int(_)) => *t, _ => panic!("tried to get overflow intrinsic for op applied to non-int type"), }; @@ -489,6 +484,15 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { #[instrument(level = "trace", skip(self))] fn load_operand(&mut self, place: PlaceRef<'tcx, &'ll Value>) -> OperandRef<'tcx, &'ll Value> { + if place.layout.is_unsized() { + let tail = self.tcx.struct_tail_with_normalize(place.layout.ty, |ty| ty, || {}); + if matches!(tail.kind(), ty::Foreign(..)) { + // Unsized locals and, at least conceptually, even unsized arguments must be copied + // around, which requires dynamically determining their size. Therefore, we cannot + // allow `extern` types here. Consult t-opsem before removing this check. + panic!("unsized locals must not be `extern` types"); + } + } assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); if place.layout.is_zst() { @@ -1003,14 +1007,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll> { - let name = cstr!("cleanuppad"); let ret = unsafe { llvm::LLVMBuildCleanupPad( self.llbuilder, parent, args.as_ptr(), args.len() as c_uint, - name.as_ptr(), + c"cleanuppad".as_ptr(), ) }; Funclet::new(ret.expect("LLVM does not have support for cleanuppad")) @@ -1024,14 +1027,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll> { - let name = cstr!("catchpad"); let ret = unsafe { llvm::LLVMBuildCatchPad( self.llbuilder, parent, args.as_ptr(), args.len() as c_uint, - name.as_ptr(), + c"catchpad".as_ptr(), ) }; Funclet::new(ret.expect("LLVM does not have support for catchpad")) @@ -1043,14 +1045,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { unwind: Option<&'ll BasicBlock>, handlers: &[&'ll BasicBlock], ) -> &'ll Value { - let name = cstr!("catchswitch"); let ret = unsafe { llvm::LLVMBuildCatchSwitch( self.llbuilder, parent, unwind, handlers.len() as c_uint, - name.as_ptr(), + c"catchswitch".as_ptr(), ) }; let ret = ret.expect("LLVM does not have support for catchswitch"); diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs index d5778757c..e675362ac 100644 --- a/compiler/rustc_codegen_llvm/src/callee.rs +++ b/compiler/rustc_codegen_llvm/src/callee.rs @@ -59,10 +59,10 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> // To avoid this, we set the Storage Class to "DllImport" so that // LLVM will prefix the name with `__imp_`. Ideally, we'd like the // existing logic below to set the Storage Class, but it has an - // exemption for MinGW for backwards compatability. + // exemption for MinGW for backwards compatibility. let llfn = cx.declare_fn( &common::i686_decorated_name( - &dllimport, + dllimport, common::is_mingw_gnu_toolchain(&tcx.sess.target), true, ), diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 0b0816c27..8173e41af 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -203,6 +203,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { unsafe { llvm::LLVMSetInitializer(g, sc); llvm::LLVMSetGlobalConstant(g, True); + llvm::LLVMSetUnnamedAddress(g, llvm::UnnamedAddr::Global); llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage); } (s.to_owned(), g) @@ -245,8 +246,8 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } Scalar::Ptr(ptr, _size) => { - let (alloc_id, offset) = ptr.into_parts(); - let (base_addr, base_addr_space) = match self.tcx.global_alloc(alloc_id) { + let (prov, offset) = ptr.into_parts(); + let (base_addr, base_addr_space) = match self.tcx.global_alloc(prov.alloc_id()) { GlobalAlloc::Memory(alloc) => { let init = const_alloc_to_llvm(self, alloc); let alloc = alloc.inner(); diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 307c1264d..77e893c81 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -8,7 +8,6 @@ use crate::llvm::{self, True}; use crate::type_::Type; use crate::type_of::LayoutLlvmExt; use crate::value::Value; -use cstr::cstr; use rustc_codegen_ssa::traits::*; use rustc_hir::def_id::DefId; use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; @@ -72,7 +71,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation< } let mut next_offset = 0; - for &(offset, alloc_id) in alloc.provenance().ptrs().iter() { + for &(offset, prov) in alloc.provenance().ptrs().iter() { let offset = offset.bytes(); assert_eq!(offset as usize as u64, offset); let offset = offset as usize; @@ -92,13 +91,10 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation< .expect("const_alloc_to_llvm: could not read relocation pointer") as u64; - let address_space = cx.tcx.global_alloc(alloc_id).address_space(cx); + let address_space = cx.tcx.global_alloc(prov.alloc_id()).address_space(cx); llvals.push(cx.scalar_to_backend( - InterpScalar::from_pointer( - Pointer::new(alloc_id, Size::from_bytes(ptr_offset)), - &cx.tcx, - ), + InterpScalar::from_pointer(Pointer::new(prov, Size::from_bytes(ptr_offset)), &cx.tcx), Scalar::Initialized { value: Primitive::Pointer(address_space), valid_range: WrappingRange::full(dl.pointer_size), @@ -187,7 +183,7 @@ fn check_and_apply_linkage<'ll, 'tcx>( { cx.declare_global( &common::i686_decorated_name( - &dllimport, + dllimport, common::is_mingw_gnu_toolchain(&cx.tcx.sess.target), true, ), @@ -476,9 +472,9 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> { .all(|&byte| byte == 0); let sect_name = if all_bytes_are_zero { - cstr!("__DATA,__thread_bss") + c"__DATA,__thread_bss" } else { - cstr!("__DATA,__thread_data") + c"__DATA,__thread_data" }; llvm::LLVMSetSection(g, sect_name.as_ptr()); } @@ -507,7 +503,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> { let val = llvm::LLVMMetadataAsValue(self.llcx, meta); llvm::LLVMAddNamedMetadataOperand( self.llmod, - "wasm.custom_sections\0".as_ptr().cast(), + c"wasm.custom_sections".as_ptr().cast(), val, ); } diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 4dd6372b5..3053c4e0d 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -8,7 +8,6 @@ use crate::llvm_util; use crate::type_::Type; use crate::value::Value; -use cstr::cstr; use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh}; use rustc_codegen_ssa::errors as ssa_errors; use rustc_codegen_ssa::traits::*; @@ -120,6 +119,7 @@ fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode { TlsModel::LocalDynamic => llvm::ThreadLocalMode::LocalDynamic, TlsModel::InitialExec => llvm::ThreadLocalMode::InitialExec, TlsModel::LocalExec => llvm::ThreadLocalMode::LocalExec, + TlsModel::Emulated => llvm::ThreadLocalMode::GeneralDynamic, } } @@ -134,18 +134,6 @@ pub unsafe fn create_module<'ll>( let mut target_data_layout = sess.target.data_layout.to_string(); let llvm_version = llvm_util::get_version(); - if llvm_version < (16, 0, 0) { - if sess.target.arch == "s390x" { - // LLVM 16 data layout changed to always set 64-bit vector alignment, - // which is conditional in earlier LLVM versions. - // https://reviews.llvm.org/D131158 for the discussion. - target_data_layout = target_data_layout.replace("-v128:64", ""); - } else if sess.target.arch == "riscv64" { - // LLVM 16 introduced this change so as to produce more efficient code. - // See https://reviews.llvm.org/D116735 for the discussion. - target_data_layout = target_data_layout.replace("-n32:64-", "-n64-"); - } - } if llvm_version < (17, 0, 0) { if sess.target.arch.starts_with("powerpc") { // LLVM 17 specifies function pointer alignment for ppc: @@ -226,13 +214,13 @@ pub unsafe fn create_module<'ll>( // If skipping the PLT is enabled, we need to add some module metadata // to ensure intrinsic calls don't use it. if !sess.needs_plt() { - let avoid_plt = "RtLibUseGOT\0".as_ptr().cast(); + let avoid_plt = c"RtLibUseGOT".as_ptr().cast(); llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Warning, avoid_plt, 1); } // Enable canonical jump tables if CFI is enabled. (See https://reviews.llvm.org/D65629.) if sess.is_sanitizer_cfi_canonical_jump_tables_enabled() && sess.is_sanitizer_cfi_enabled() { - let canonical_jump_tables = "CFI Canonical Jump Tables\0".as_ptr().cast(); + let canonical_jump_tables = c"CFI Canonical Jump Tables".as_ptr().cast(); llvm::LLVMRustAddModuleFlag( llmod, llvm::LLVMModFlagBehavior::Override, @@ -243,7 +231,7 @@ pub unsafe fn create_module<'ll>( // Enable LTO unit splitting if specified or if CFI is enabled. (See https://reviews.llvm.org/D53891.) if sess.is_split_lto_unit_enabled() || sess.is_sanitizer_cfi_enabled() { - let enable_split_lto_unit = "EnableSplitLTOUnit\0".as_ptr().cast(); + let enable_split_lto_unit = c"EnableSplitLTOUnit".as_ptr().cast(); llvm::LLVMRustAddModuleFlag( llmod, llvm::LLVMModFlagBehavior::Override, @@ -254,7 +242,7 @@ pub unsafe fn create_module<'ll>( // Add "kcfi" module flag if KCFI is enabled. (See https://reviews.llvm.org/D119296.) if sess.is_sanitizer_kcfi_enabled() { - let kcfi = "kcfi\0".as_ptr().cast(); + let kcfi = c"kcfi".as_ptr().cast(); llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Override, kcfi, 1); } @@ -267,7 +255,7 @@ pub unsafe fn create_module<'ll>( llvm::LLVMRustAddModuleFlag( llmod, llvm::LLVMModFlagBehavior::Warning, - "cfguard\0".as_ptr() as *const _, + c"cfguard".as_ptr() as *const _, 1, ) } @@ -276,7 +264,7 @@ pub unsafe fn create_module<'ll>( llvm::LLVMRustAddModuleFlag( llmod, llvm::LLVMModFlagBehavior::Warning, - "cfguard\0".as_ptr() as *const _, + c"cfguard".as_ptr() as *const _, 2, ) } @@ -294,26 +282,26 @@ pub unsafe fn create_module<'ll>( llvm::LLVMRustAddModuleFlag( llmod, behavior, - "branch-target-enforcement\0".as_ptr().cast(), + c"branch-target-enforcement".as_ptr().cast(), bti.into(), ); llvm::LLVMRustAddModuleFlag( llmod, behavior, - "sign-return-address\0".as_ptr().cast(), + c"sign-return-address".as_ptr().cast(), pac_ret.is_some().into(), ); let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, key: PAuthKey::A }); llvm::LLVMRustAddModuleFlag( llmod, behavior, - "sign-return-address-all\0".as_ptr().cast(), + c"sign-return-address-all".as_ptr().cast(), pac_opts.leaf.into(), ); llvm::LLVMRustAddModuleFlag( llmod, behavior, - "sign-return-address-with-bkey\0".as_ptr().cast(), + c"sign-return-address-with-bkey".as_ptr().cast(), u32::from(pac_opts.key == PAuthKey::B), ); } else { @@ -329,7 +317,7 @@ pub unsafe fn create_module<'ll>( llvm::LLVMRustAddModuleFlag( llmod, llvm::LLVMModFlagBehavior::Override, - "cf-protection-branch\0".as_ptr().cast(), + c"cf-protection-branch".as_ptr().cast(), 1, ) } @@ -337,7 +325,7 @@ pub unsafe fn create_module<'ll>( llvm::LLVMRustAddModuleFlag( llmod, llvm::LLVMModFlagBehavior::Override, - "cf-protection-return\0".as_ptr().cast(), + c"cf-protection-return".as_ptr().cast(), 1, ) } @@ -346,11 +334,21 @@ pub unsafe fn create_module<'ll>( llvm::LLVMRustAddModuleFlag( llmod, llvm::LLVMModFlagBehavior::Error, - "Virtual Function Elim\0".as_ptr().cast(), + c"Virtual Function Elim".as_ptr().cast(), 1, ); } + // Set module flag to enable Windows EHCont Guard (/guard:ehcont). + if sess.opts.unstable_opts.ehcont_guard { + llvm::LLVMRustAddModuleFlag( + llmod, + llvm::LLVMModFlagBehavior::Warning, + c"ehcontguard".as_ptr() as *const _, + 1, + ) + } + // Insert `llvm.ident` metadata. // // On the wasm targets it will get hooked up to the "producer" sections @@ -364,10 +362,28 @@ pub unsafe fn create_module<'ll>( ); llvm::LLVMAddNamedMetadataOperand( llmod, - cstr!("llvm.ident").as_ptr(), + c"llvm.ident".as_ptr(), llvm::LLVMMDNodeInContext(llcx, &name_metadata, 1), ); + // Add module flags specified via -Z llvm_module_flag + for (key, value, behavior) in &sess.opts.unstable_opts.llvm_module_flag { + let key = format!("{key}\0"); + let behavior = match behavior.as_str() { + "error" => llvm::LLVMModFlagBehavior::Error, + "warning" => llvm::LLVMModFlagBehavior::Warning, + "require" => llvm::LLVMModFlagBehavior::Require, + "override" => llvm::LLVMModFlagBehavior::Override, + "append" => llvm::LLVMModFlagBehavior::Append, + "appendunique" => llvm::LLVMModFlagBehavior::AppendUnique, + "max" => llvm::LLVMModFlagBehavior::Max, + "min" => llvm::LLVMModFlagBehavior::Min, + // We already checked this during option parsing + _ => unreachable!(), + }; + llvm::LLVMRustAddModuleFlag(llmod, behavior, key.as_ptr().cast(), *value) + } + llmod } @@ -494,14 +510,13 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { } pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) { - let section = cstr!("llvm.metadata"); let array = self.const_array(self.type_ptr(), values); unsafe { let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr()); llvm::LLVMSetInitializer(g, array); llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage); - llvm::LLVMSetSection(g, section.as_ptr()); + llvm::LLVMSetSection(g, c"llvm.metadata".as_ptr()); } } } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs index 274e0aeaa..33bfde03a 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs @@ -58,6 +58,11 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) { return; } + // The entries of the map are only used to get a list of all files with + // coverage info. In the end the list of files is passed into + // `GlobalFileTable::new()` which internally do `.sort_unstable_by()`, so + // the iteration order here does not matter. + #[allow(rustc::potential_query_instability)] let function_coverage_entries = function_coverage_map .into_iter() .map(|(instance, function_coverage)| (instance, function_coverage.into_finished())) @@ -176,7 +181,7 @@ impl GlobalFileTable { // compilation directory can be combined with the relative paths // to get absolute paths, if needed. use rustc_session::RemapFileNameExt; - let working_dir: &str = &tcx.sess.opts.working_dir.for_codegen(&tcx.sess).to_string_lossy(); + let working_dir: &str = &tcx.sess.opts.working_dir.for_codegen(tcx.sess).to_string_lossy(); llvm::build_byte_buffer(|buffer| { coverageinfo::write_filenames_section_to_buffer( @@ -189,8 +194,6 @@ impl GlobalFileTable { } rustc_index::newtype_index! { - // Tell the newtype macro to not generate `Encode`/`Decode` impls. - #[custom_encodable] struct LocalFileId {} } @@ -375,10 +378,7 @@ fn add_unused_functions(cx: &CodegenCx<'_, '_>) { // just "functions", like consts, statics, etc. Filter those out. // If `ignore_unused_generics` was specified, filter out any // generic functions from consideration as well. - if !matches!( - kind, - DefKind::Fn | DefKind::AssocFn | DefKind::Closure | DefKind::Coroutine - ) { + if !matches!(kind, DefKind::Fn | DefKind::AssocFn | DefKind::Closure) { return None; } if ignore_unused_generics && tcx.generics_of(def_id).requires_monomorphization(tcx) { diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs index 7d6975618..0befbb5a3 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs @@ -85,6 +85,14 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { let bx = self; + match coverage.kind { + // Marker statements have no effect during codegen, + // so return early and don't create `func_coverage`. + CoverageKind::SpanMarker => return, + // Match exhaustively to ensure that newly-added kinds are classified correctly. + CoverageKind::CounterIncrement { .. } | CoverageKind::ExpressionUsed { .. } => {} + } + let Some(function_coverage_info) = bx.tcx.instance_mir(instance.def).function_coverage_info.as_deref() else { @@ -100,6 +108,9 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { let Coverage { kind } = coverage; match *kind { + CoverageKind::SpanMarker => unreachable!( + "unexpected marker statement {kind:?} should have caused an early return" + ), CoverageKind::CounterIncrement { id } => { func_coverage.mark_counter_id_seen(id); // We need to explicitly drop the `RefMut` before calling into `instrprof_increment`, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs index 425e935bc..d82b1e1e7 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs @@ -30,14 +30,13 @@ pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, /// Allocates the global variable responsible for the .debug_gdb_scripts binary /// section. pub fn get_or_insert_gdb_debug_scripts_section_global<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll Value { - let c_section_var_name = "__rustc_debug_gdb_scripts_section__\0"; - let section_var_name = &c_section_var_name[..c_section_var_name.len() - 1]; + let c_section_var_name = c"__rustc_debug_gdb_scripts_section__"; + let section_var_name = c_section_var_name.to_str().unwrap(); let section_var = unsafe { llvm::LLVMGetNamedGlobal(cx.llmod, c_section_var_name.as_ptr().cast()) }; section_var.unwrap_or_else(|| { - let section_name = b".debug_gdb_scripts\0"; let mut section_contents = Vec::new(); // Add the pretty printers for the standard library first. @@ -70,7 +69,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global<'ll>(cx: &CodegenCx<'ll, ' let section_var = cx .define_global(section_var_name, llvm_type) .unwrap_or_else(|| bug!("symbol `{}` is already defined", section_var_name)); - llvm::LLVMSetSection(section_var, section_name.as_ptr().cast()); + llvm::LLVMSetSection(section_var, c".debug_gdb_scripts".as_ptr().cast()); llvm::LLVMSetInitializer(section_var, cx.const_bytes(section_contents)); llvm::LLVMSetGlobalConstant(section_var, llvm::True); llvm::LLVMSetUnnamedAddress(section_var, llvm::UnnamedAddr::Global); diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 865bf01c8..883f82caa 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -17,10 +17,10 @@ use crate::debuginfo::utils::FatPtrKind; use crate::llvm; use crate::llvm::debuginfo::{ DIDescriptor, DIFile, DIFlags, DILexicalBlock, DIScope, DIType, DebugEmissionKind, + DebugNameTableKind, }; use crate::value::Value; -use cstr::cstr; use rustc_codegen_ssa::debuginfo::type_names::cpp_like_debuginfo; use rustc_codegen_ssa::debuginfo::type_names::VTableNameKind; use rustc_codegen_ssa::traits::*; @@ -35,9 +35,10 @@ use rustc_middle::ty::{ use rustc_session::config::{self, DebugInfo, Lto}; use rustc_span::symbol::Symbol; use rustc_span::FileName; -use rustc_span::{self, FileNameDisplayPreference, SourceFile}; +use rustc_span::{FileNameDisplayPreference, SourceFile}; use rustc_symbol_mangling::typeid_for_trait_ref; use rustc_target::abi::{Align, Size}; +use rustc_target::spec::DebuginfoKind; use smallvec::smallvec; use libc::{c_char, c_longlong, c_uint}; @@ -606,7 +607,7 @@ pub fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFile) -> if let Ok(rel_path) = abs_path.strip_prefix(working_directory) { ( - working_directory.to_string_lossy().into(), + working_directory.to_string_lossy(), rel_path.to_string_lossy().into_owned(), ) } else { @@ -853,8 +854,7 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>( use rustc_session::RemapFileNameExt; let name_in_debuginfo = name_in_debuginfo.to_string_lossy(); - let work_dir = tcx.sess.opts.working_dir.for_codegen(&tcx.sess).to_string_lossy(); - let flags = "\0"; + let work_dir = tcx.sess.opts.working_dir.for_codegen(tcx.sess).to_string_lossy(); let output_filenames = tcx.output_filenames(()); let split_name = if tcx.sess.target_can_use_split_dwarf() { output_filenames @@ -878,6 +878,17 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>( let split_name = split_name.to_str().unwrap(); let kind = DebugEmissionKind::from_generic(tcx.sess.opts.debuginfo); + let dwarf_version = + tcx.sess.opts.unstable_opts.dwarf_version.unwrap_or(tcx.sess.target.default_dwarf_version); + let is_dwarf_kind = + matches!(tcx.sess.target.debuginfo_kind, DebuginfoKind::Dwarf | DebuginfoKind::DwarfDsym); + // Don't emit `.debug_pubnames` and `.debug_pubtypes` on DWARFv4 or lower. + let debug_name_table_kind = if is_dwarf_kind && dwarf_version <= 4 { + DebugNameTableKind::None + } else { + DebugNameTableKind::Default + }; + unsafe { let compile_unit_file = llvm::LLVMRustDIBuilderCreateFile( debug_context.builder, @@ -897,7 +908,7 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>( producer.as_ptr().cast(), producer.len(), tcx.sess.opts.optimize != config::OptLevel::No, - flags.as_ptr().cast(), + c"".as_ptr().cast(), 0, // NB: this doesn't actually have any perceptible effect, it seems. LLVM will instead // put the path supplied to `MCSplitDwarfFile` into the debug info of the final @@ -907,6 +918,7 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>( kind, 0, tcx.sess.opts.unstable_opts.split_dwarf_inlining, + debug_name_table_kind, ); if tcx.sess.opts.unstable_opts.profile { @@ -926,8 +938,7 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>( ); let val = llvm::LLVMMetadataAsValue(debug_context.llcontext, gcov_metadata); - let llvm_gcov_ident = cstr!("llvm.gcov"); - llvm::LLVMAddNamedMetadataOperand(debug_context.llmod, llvm_gcov_ident.as_ptr(), val); + llvm::LLVMAddNamedMetadataOperand(debug_context.llmod, c"llvm.gcov".as_ptr(), val); } return unit_metadata; @@ -966,6 +977,27 @@ fn build_field_di_node<'ll, 'tcx>( } } +/// Returns the `DIFlags` corresponding to the visibility of the item identified by `did`. +/// +/// `DIFlags::Flag{Public,Protected,Private}` correspond to `DW_AT_accessibility` +/// (public/protected/private) aren't exactly right for Rust, but neither is `DW_AT_visibility` +/// (local/exported/qualified), and there's no way to set `DW_AT_visibility` in LLVM's API. +fn visibility_di_flags<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + did: DefId, + type_did: DefId, +) -> DIFlags { + let parent_did = cx.tcx.parent(type_did); + let visibility = cx.tcx.visibility(did); + match visibility { + Visibility::Public => DIFlags::FlagPublic, + // Private fields have a restricted visibility of the module containing the type. + Visibility::Restricted(did) if did == parent_did => DIFlags::FlagPrivate, + // `pub(crate)`/`pub(super)` visibilities are any other restricted visibility. + Visibility::Restricted(..) => DIFlags::FlagProtected, + } +} + /// Creates the debuginfo node for a Rust struct type. Maybe be a regular struct or a tuple-struct. fn build_struct_type_di_node<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, @@ -989,7 +1021,7 @@ fn build_struct_type_di_node<'ll, 'tcx>( &compute_debuginfo_type_name(cx.tcx, struct_type, false), size_and_align_of(struct_type_and_layout), Some(containing_scope), - DIFlags::FlagZero, + visibility_di_flags(cx, adt_def.did(), adt_def.did()), ), // Fields: |cx, owner| { @@ -1012,7 +1044,7 @@ fn build_struct_type_di_node<'ll, 'tcx>( &field_name[..], (field_layout.size, field_layout.align.abi), struct_type_and_layout.fields.offset(i), - DIFlags::FlagZero, + visibility_di_flags(cx, f.did, adt_def.did()), type_di_node(cx, field_layout.ty), ) }) diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs index ca7bfbeac..4a2861af4 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs @@ -26,8 +26,8 @@ use crate::{ enums::{tag_base_type, DiscrResult}, file_metadata, size_and_align_of, type_di_node, type_map::{self, Stub, UniqueTypeId}, - unknown_file_metadata, DINodeCreationResult, SmallVec, NO_GENERICS, NO_SCOPE_METADATA, - UNKNOWN_LINE_NUMBER, + unknown_file_metadata, visibility_di_flags, DINodeCreationResult, SmallVec, + NO_GENERICS, NO_SCOPE_METADATA, UNKNOWN_LINE_NUMBER, }, utils::DIB, }, @@ -215,7 +215,7 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>( &enum_type_name, cx.size_and_align_of(enum_type), NO_SCOPE_METADATA, - DIFlags::FlagZero, + visibility_di_flags(cx, enum_adt_def.did(), enum_adt_def.did()), ), |cx, enum_type_di_node| { match enum_type_and_layout.variants { @@ -320,6 +320,7 @@ fn build_single_variant_union_fields<'ll, 'tcx>( variant_index: VariantIdx, ) -> SmallVec<&'ll DIType> { let variant_layout = enum_type_and_layout.for_variant(cx, variant_index); + let visibility_flags = visibility_di_flags(cx, enum_adt_def.did(), enum_adt_def.did()); let variant_struct_type_di_node = super::build_enum_variant_struct_type_di_node( cx, enum_type_and_layout, @@ -327,6 +328,7 @@ fn build_single_variant_union_fields<'ll, 'tcx>( variant_index, enum_adt_def.variant(variant_index), variant_layout, + visibility_flags, ); let tag_base_type = cx.tcx.types.u32; @@ -364,7 +366,7 @@ fn build_single_variant_union_fields<'ll, 'tcx>( // since the later is sometimes smaller (if it has fewer fields). size_and_align_of(enum_type_and_layout), Size::ZERO, - DIFlags::FlagZero, + visibility_flags, variant_struct_type_wrapper_di_node, ), unsafe { @@ -376,7 +378,7 @@ fn build_single_variant_union_fields<'ll, 'tcx>( unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, variant_names_type_di_node, - DIFlags::FlagZero, + visibility_flags, Some(cx.const_u64(SINGLE_VARIANT_VIRTUAL_DISR)), tag_base_type_align.bits() as u32, ) @@ -403,6 +405,7 @@ fn build_union_fields_for_enum<'ll, 'tcx>( (variant_index, variant_name) }), ); + let visibility_flags = visibility_di_flags(cx, enum_adt_def.did(), enum_adt_def.did()); let variant_field_infos: SmallVec> = variant_indices .map(|variant_index| { @@ -417,6 +420,7 @@ fn build_union_fields_for_enum<'ll, 'tcx>( variant_index, variant_def, variant_layout, + visibility_flags, ); VariantFieldInfo { @@ -437,6 +441,7 @@ fn build_union_fields_for_enum<'ll, 'tcx>( tag_base_type, tag_field, untagged_variant_index, + visibility_flags, ) } @@ -715,7 +720,7 @@ fn build_union_fields_for_direct_tag_coroutine<'ll, 'tcx>( coroutine_type_and_layout, coroutine_type_di_node, coroutine_layout, - &common_upvar_names, + common_upvar_names, ); let span = coroutine_layout.variant_source_info[variant_index].span; @@ -744,6 +749,7 @@ fn build_union_fields_for_direct_tag_coroutine<'ll, 'tcx>( tag_base_type, tag_field, None, + DIFlags::FlagZero, ) } @@ -758,6 +764,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>( tag_base_type: Ty<'tcx>, tag_field: usize, untagged_variant_index: Option, + di_flags: DIFlags, ) -> SmallVec<&'ll DIType> { let tag_base_type_di_node = type_di_node(cx, tag_base_type); let mut unions_fields = SmallVec::with_capacity(variant_field_infos.len() + 1); @@ -801,7 +808,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>( align.bits() as u32, // Union fields are always at offset zero Size::ZERO.bits(), - DIFlags::FlagZero, + di_flags, variant_struct_type_wrapper, ) } @@ -835,7 +842,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>( TAG_FIELD_NAME_128_LO, size_and_align, lo_offset, - DIFlags::FlagZero, + di_flags, type_di_node, )); @@ -855,7 +862,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>( TAG_FIELD_NAME, cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty), enum_type_and_layout.fields.offset(tag_field), - DIFlags::FlagZero, + di_flags, tag_base_type_di_node, )); } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs index df1df6d19..eef8dbb33 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs @@ -250,6 +250,7 @@ fn build_enum_variant_struct_type_di_node<'ll, 'tcx>( variant_index: VariantIdx, variant_def: &VariantDef, variant_layout: TyAndLayout<'tcx>, + di_flags: DIFlags, ) -> &'ll DIType { debug_assert_eq!(variant_layout.ty, enum_type_and_layout.ty); @@ -267,7 +268,7 @@ fn build_enum_variant_struct_type_di_node<'ll, 'tcx>( // NOTE: We use size and align of enum_type, not from variant_layout: size_and_align_of(enum_type_and_layout), Some(enum_type_di_node), - DIFlags::FlagZero, + di_flags, ), |cx, struct_type_di_node| { (0..variant_layout.fields.count()) @@ -289,7 +290,7 @@ fn build_enum_variant_struct_type_di_node<'ll, 'tcx>( &field_name, (field_layout.size, field_layout.align.abi), variant_layout.fields.offset(field_index), - DIFlags::FlagZero, + di_flags, type_di_node(cx, field_layout.ty), ) }) @@ -395,7 +396,7 @@ pub fn build_coroutine_variant_struct_type_di_node<'ll, 'tcx>( }) .collect(); - state_specific_fields.into_iter().chain(common_fields.into_iter()).collect() + state_specific_fields.into_iter().chain(common_fields).collect() }, |cx| build_generic_type_param_di_nodes(cx, coroutine_type_and_layout.ty), ) diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs index 7eff52b85..cba4e3811 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs @@ -7,8 +7,8 @@ use crate::{ enums::tag_base_type, file_metadata, size_and_align_of, type_di_node, type_map::{self, Stub, StubInfo, UniqueTypeId}, - unknown_file_metadata, DINodeCreationResult, SmallVec, NO_GENERICS, - UNKNOWN_LINE_NUMBER, + unknown_file_metadata, visibility_di_flags, DINodeCreationResult, SmallVec, + NO_GENERICS, UNKNOWN_LINE_NUMBER, }, utils::{create_DIArray, get_namespace_for_item, DIB}, }, @@ -63,6 +63,8 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>( let enum_type_and_layout = cx.layout_of(enum_type); let enum_type_name = compute_debuginfo_type_name(cx.tcx, enum_type, false); + let visibility_flags = visibility_di_flags(cx, enum_adt_def.did(), enum_adt_def.did()); + debug_assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout)); type_map::build_type_with_children( @@ -74,7 +76,7 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>( &enum_type_name, size_and_align_of(enum_type_and_layout), Some(containing_scope), - DIFlags::FlagZero, + visibility_flags, ), |cx, enum_type_di_node| { // Build the struct type for each variant. These will be referenced by the @@ -92,6 +94,7 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>( variant_index, enum_adt_def.variant(variant_index), enum_type_and_layout.for_variant(cx, variant_index), + visibility_flags, ), source_info: None, }) @@ -197,7 +200,7 @@ pub(super) fn build_coroutine_di_node<'ll, 'tcx>( coroutine_type_and_layout, coroutine_type_di_node, coroutine_layout, - &common_upvar_names, + common_upvar_names, ), source_info, } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs index 4832b147a..31631e8a8 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs @@ -32,7 +32,7 @@ use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TypeVisitableExt}; use rustc_session::config::{self, DebugInfo}; use rustc_session::Session; use rustc_span::symbol::Symbol; -use rustc_span::{self, BytePos, Pos, SourceFile, SourceFileAndLine, SourceFileHash, Span}; +use rustc_span::{BytePos, Pos, SourceFile, SourceFileAndLine, SourceFileHash, Span}; use rustc_target::abi::Size; use libc::c_uint; @@ -112,7 +112,7 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> { llvm::LLVMRustAddModuleFlag( self.llmod, llvm::LLVMModFlagBehavior::Warning, - "Dwarf Version\0".as_ptr().cast(), + c"Dwarf Version".as_ptr().cast(), dwarf_version, ); } else { @@ -120,17 +120,16 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> { llvm::LLVMRustAddModuleFlag( self.llmod, llvm::LLVMModFlagBehavior::Warning, - "CodeView\0".as_ptr().cast(), + c"CodeView".as_ptr().cast(), 1, ) } // Prevent bitcode readers from deleting the debug info. - let ptr = "Debug Info Version\0".as_ptr(); llvm::LLVMRustAddModuleFlag( self.llmod, llvm::LLVMModFlagBehavior::Warning, - ptr.cast(), + c"Debug Info Version".as_ptr().cast(), llvm::LLVMRustDebugMetadataVersion(), ); } diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index 164b12cf8..78c0725a6 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -84,7 +84,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { fn_type: &'ll Type, ) -> &'ll Value { // Declare C ABI functions with the visibility used by C by default. - let visibility = if self.tcx.sess.target.default_hidden_visibility { + let visibility = if self.tcx.sess.default_hidden_visibility() { llvm::Visibility::Hidden } else { llvm::Visibility::Default @@ -107,7 +107,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { unnamed: llvm::UnnamedAddr, fn_type: &'ll Type, ) -> &'ll Value { - let visibility = if self.tcx.sess.target.default_hidden_visibility { + let visibility = if self.tcx.sess.default_hidden_visibility() { llvm::Visibility::Hidden } else { llvm::Visibility::Default diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs index 10ca5ad80..671a22525 100644 --- a/compiler/rustc_codegen_llvm/src/errors.rs +++ b/compiler/rustc_codegen_llvm/src/errors.rs @@ -5,7 +5,7 @@ use std::path::Path; use crate::fluent_generated as fluent; use rustc_data_structures::small_c_str::SmallCStr; use rustc_errors::{ - DiagnosticBuilder, EmissionGuarantee, ErrorGuaranteed, Handler, IntoDiagnostic, + DiagCtxt, DiagnosticBuilder, EmissionGuarantee, ErrorGuaranteed, FatalError, IntoDiagnostic, }; use rustc_macros::{Diagnostic, Subdiagnostic}; use rustc_span::Span; @@ -101,13 +101,13 @@ pub(crate) struct DynamicLinkingWithLTO; pub(crate) struct ParseTargetMachineConfig<'a>(pub LlvmError<'a>); -impl IntoDiagnostic<'_, EM> for ParseTargetMachineConfig<'_> { - fn into_diagnostic(self, sess: &'_ Handler) -> DiagnosticBuilder<'_, EM> { - let diag: DiagnosticBuilder<'_, EM> = self.0.into_diagnostic(sess); +impl IntoDiagnostic<'_, FatalError> for ParseTargetMachineConfig<'_> { + fn into_diagnostic(self, dcx: &'_ DiagCtxt) -> DiagnosticBuilder<'_, FatalError> { + let diag: DiagnosticBuilder<'_, FatalError> = self.0.into_diagnostic(dcx); let (message, _) = diag.styled_message().first().expect("`LlvmError` with no message"); - let message = sess.eagerly_translate_to_string(message.clone(), diag.args()); + let message = dcx.eagerly_translate_to_string(message.clone(), diag.args()); - let mut diag = sess.struct_diagnostic(fluent::codegen_llvm_parse_target_machine_config); + let mut diag = dcx.struct_almost_fatal(fluent::codegen_llvm_parse_target_machine_config); diag.set_arg("error", message); diag } @@ -124,8 +124,8 @@ pub(crate) struct TargetFeatureDisableOrEnable<'a> { pub(crate) struct MissingFeatures; impl IntoDiagnostic<'_, ErrorGuaranteed> for TargetFeatureDisableOrEnable<'_> { - fn into_diagnostic(self, sess: &'_ Handler) -> DiagnosticBuilder<'_, ErrorGuaranteed> { - let mut diag = sess.struct_err(fluent::codegen_llvm_target_feature_disable_or_enable); + fn into_diagnostic(self, dcx: &'_ DiagCtxt) -> DiagnosticBuilder<'_, ErrorGuaranteed> { + let mut diag = dcx.struct_err(fluent::codegen_llvm_target_feature_disable_or_enable); if let Some(span) = self.span { diag.set_span(span); }; @@ -183,8 +183,8 @@ pub enum LlvmError<'a> { pub(crate) struct WithLlvmError<'a>(pub LlvmError<'a>, pub String); -impl IntoDiagnostic<'_, EM> for WithLlvmError<'_> { - fn into_diagnostic(self, sess: &'_ Handler) -> DiagnosticBuilder<'_, EM> { +impl IntoDiagnostic<'_, G> for WithLlvmError<'_> { + fn into_diagnostic(self, dcx: &'_ DiagCtxt) -> DiagnosticBuilder<'_, G> { use LlvmError::*; let msg_with_llvm_err = match &self.0 { WriteOutput { .. } => fluent::codegen_llvm_write_output_with_llvm_err, @@ -201,7 +201,7 @@ impl IntoDiagnostic<'_, EM> for WithLlvmError<'_> { PrepareThinLtoModule => fluent::codegen_llvm_prepare_thin_lto_module_with_llvm_err, ParseBitcode => fluent::codegen_llvm_parse_bitcode_with_llvm_err, }; - let mut diag = self.0.into_diagnostic(sess); + let mut diag = self.0.into_diagnostic(dcx); diag.set_primary_message(msg_with_llvm_err); diag.set_arg("llvm_err", self.1); diag diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index cc7e78b9c..58e68a649 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -10,7 +10,7 @@ use crate::value::Value; use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh}; use rustc_codegen_ssa::common::{IntPredicate, TypeKind}; use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization}; -use rustc_codegen_ssa::mir::operand::OperandRef; +use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::*; use rustc_hir as hir; @@ -946,6 +946,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>( tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), callee_ty.fn_sig(tcx)); let arg_tys = sig.inputs(); + // Vectors must be immediates (non-power-of-2 #[repr(packed)] are not) + for (ty, arg) in arg_tys.iter().zip(args) { + if ty.is_simd() && !matches!(arg.val, OperandValue::Immediate(_)) { + return_error!(InvalidMonomorphization::SimdArgument { span, name, ty: *ty }); + } + } + if name == sym::simd_select_bitmask { let (len, _) = require_simd!(arg_tys[1], SimdArgument); @@ -1492,6 +1499,198 @@ fn generic_simd_intrinsic<'ll, 'tcx>( return Ok(v); } + if name == sym::simd_masked_load { + // simd_masked_load(mask: , pointer: *_ T, values: ) -> + // * N: number of elements in the input vectors + // * T: type of the element to load + // * M: any integer width is supported, will be truncated to i1 + // Loads contiguous elements from memory behind `pointer`, but only for + // those lanes whose `mask` bit is enabled. + // The memory addresses corresponding to the “off” lanes are not accessed. + + // The element type of the "mask" argument must be a signed integer type of any width + let mask_ty = in_ty; + let (mask_len, mask_elem) = (in_len, in_elem); + + // The second argument must be a pointer matching the element type + let pointer_ty = arg_tys[1]; + + // The last argument is a passthrough vector providing values for disabled lanes + let values_ty = arg_tys[2]; + let (values_len, values_elem) = require_simd!(values_ty, SimdThird); + + require_simd!(ret_ty, SimdReturn); + + // Of the same length: + require!( + values_len == mask_len, + InvalidMonomorphization::ThirdArgumentLength { + span, + name, + in_len: mask_len, + in_ty: mask_ty, + arg_ty: values_ty, + out_len: values_len + } + ); + + // The return type must match the last argument type + require!( + ret_ty == values_ty, + InvalidMonomorphization::ExpectedReturnType { span, name, in_ty: values_ty, ret_ty } + ); + + require!( + matches!( + pointer_ty.kind(), + ty::RawPtr(p) if p.ty == values_elem && p.ty.kind() == values_elem.kind() + ), + InvalidMonomorphization::ExpectedElementType { + span, + name, + expected_element: values_elem, + second_arg: pointer_ty, + in_elem: values_elem, + in_ty: values_ty, + mutability: ExpectedPointerMutability::Not, + } + ); + + require!( + matches!(mask_elem.kind(), ty::Int(_)), + InvalidMonomorphization::ThirdArgElementType { + span, + name, + expected_element: values_elem, + third_arg: mask_ty, + } + ); + + // Alignment of T, must be a constant integer value: + let alignment_ty = bx.type_i32(); + let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32); + + // Truncate the mask vector to a vector of i1s: + let (mask, mask_ty) = { + let i1 = bx.type_i1(); + let i1xn = bx.type_vector(i1, mask_len); + (bx.trunc(args[0].immediate(), i1xn), i1xn) + }; + + let llvm_pointer = bx.type_ptr(); + + // Type of the vector of elements: + let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len); + let llvm_elem_vec_str = llvm_vector_str(bx, values_elem, values_len); + + let llvm_intrinsic = format!("llvm.masked.load.{llvm_elem_vec_str}.p0"); + let fn_ty = bx + .type_func(&[llvm_pointer, alignment_ty, mask_ty, llvm_elem_vec_ty], llvm_elem_vec_ty); + let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); + let v = bx.call( + fn_ty, + None, + None, + f, + &[args[1].immediate(), alignment, mask, args[2].immediate()], + None, + ); + return Ok(v); + } + + if name == sym::simd_masked_store { + // simd_masked_store(mask: , pointer: *mut T, values: ) -> () + // * N: number of elements in the input vectors + // * T: type of the element to load + // * M: any integer width is supported, will be truncated to i1 + // Stores contiguous elements to memory behind `pointer`, but only for + // those lanes whose `mask` bit is enabled. + // The memory addresses corresponding to the “off” lanes are not accessed. + + // The element type of the "mask" argument must be a signed integer type of any width + let mask_ty = in_ty; + let (mask_len, mask_elem) = (in_len, in_elem); + + // The second argument must be a pointer matching the element type + let pointer_ty = arg_tys[1]; + + // The last argument specifies the values to store to memory + let values_ty = arg_tys[2]; + let (values_len, values_elem) = require_simd!(values_ty, SimdThird); + + // Of the same length: + require!( + values_len == mask_len, + InvalidMonomorphization::ThirdArgumentLength { + span, + name, + in_len: mask_len, + in_ty: mask_ty, + arg_ty: values_ty, + out_len: values_len + } + ); + + // The second argument must be a mutable pointer type matching the element type + require!( + matches!( + pointer_ty.kind(), + ty::RawPtr(p) if p.ty == values_elem && p.ty.kind() == values_elem.kind() && p.mutbl.is_mut() + ), + InvalidMonomorphization::ExpectedElementType { + span, + name, + expected_element: values_elem, + second_arg: pointer_ty, + in_elem: values_elem, + in_ty: values_ty, + mutability: ExpectedPointerMutability::Mut, + } + ); + + require!( + matches!(mask_elem.kind(), ty::Int(_)), + InvalidMonomorphization::ThirdArgElementType { + span, + name, + expected_element: values_elem, + third_arg: mask_ty, + } + ); + + // Alignment of T, must be a constant integer value: + let alignment_ty = bx.type_i32(); + let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32); + + // Truncate the mask vector to a vector of i1s: + let (mask, mask_ty) = { + let i1 = bx.type_i1(); + let i1xn = bx.type_vector(i1, in_len); + (bx.trunc(args[0].immediate(), i1xn), i1xn) + }; + + let ret_t = bx.type_void(); + + let llvm_pointer = bx.type_ptr(); + + // Type of the vector of elements: + let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len); + let llvm_elem_vec_str = llvm_vector_str(bx, values_elem, values_len); + + let llvm_intrinsic = format!("llvm.masked.store.{llvm_elem_vec_str}.p0"); + let fn_ty = bx.type_func(&[llvm_elem_vec_ty, llvm_pointer, alignment_ty, mask_ty], ret_t); + let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); + let v = bx.call( + fn_ty, + None, + None, + f, + &[args[2].immediate(), args[1].immediate(), alignment, mask], + None, + ); + return Ok(v); + } + if name == sym::simd_scatter { // simd_scatter(values: , pointers: , // mask: ) -> () diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index a4e027012..ed0d853e7 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -4,10 +4,11 @@ //! //! This API is completely unstable and subject to change. -#![cfg_attr(not(bootstrap), allow(internal_features))] -#![cfg_attr(not(bootstrap), feature(rustdoc_internals))] -#![cfg_attr(not(bootstrap), doc(rust_logo))] +#![allow(internal_features)] +#![feature(rustdoc_internals)] +#![doc(rust_logo)] #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] +#![feature(c_str_literals)] #![feature(exact_size_is_empty)] #![feature(extern_types)] #![feature(hash_raw_entry)] @@ -17,7 +18,6 @@ #![feature(never_type)] #![feature(impl_trait_in_assoc_type)] #![recursion_limit = "256"] -#![allow(rustc::potential_query_instability)] #![deny(rustc::untranslatable_diagnostic)] #![deny(rustc::diagnostic_outside_of_impl)] @@ -40,8 +40,7 @@ use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::ModuleCodegen; use rustc_codegen_ssa::{CodegenResults, CompiledModule}; use rustc_data_structures::fx::FxIndexMap; -use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, FatalError, Handler, SubdiagnosticMessage}; -use rustc_fluent_macro::fluent_messages; +use rustc_errors::{DiagCtxt, ErrorGuaranteed, FatalError}; use rustc_metadata::EncodedMetadata; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; use rustc_middle::ty::TyCtxt; @@ -93,7 +92,7 @@ mod type_of; mod va_arg; mod value; -fluent_messages! { "../messages.ftl" } +rustc_fluent_macro::fluent_messages! { "../messages.ftl" } #[derive(Clone)] pub struct LlvmCodegenBackend(()); @@ -105,7 +104,7 @@ struct TimeTraceProfiler { impl TimeTraceProfiler { fn new(enabled: bool) -> Self { if enabled { - unsafe { llvm::LLVMTimeTraceProfilerInitialize() } + unsafe { llvm::LLVMRustTimeTraceProfilerInitialize() } } TimeTraceProfiler { enabled } } @@ -114,7 +113,7 @@ impl TimeTraceProfiler { impl Drop for TimeTraceProfiler { fn drop(&mut self) { if self.enabled { - unsafe { llvm::LLVMTimeTraceProfilerFinishThread() } + unsafe { llvm::LLVMRustTimeTraceProfilerFinishThread() } } } } @@ -201,10 +200,10 @@ impl WriteBackendMethods for LlvmCodegenBackend { } fn run_link( cgcx: &CodegenContext, - diag_handler: &Handler, + dcx: &DiagCtxt, modules: Vec>, ) -> Result, FatalError> { - back::write::link(cgcx, diag_handler, modules) + back::write::link(cgcx, dcx, modules) } fn run_fat_lto( cgcx: &CodegenContext, @@ -222,18 +221,18 @@ impl WriteBackendMethods for LlvmCodegenBackend { } unsafe fn optimize( cgcx: &CodegenContext, - diag_handler: &Handler, + dcx: &DiagCtxt, module: &ModuleCodegen, config: &ModuleConfig, ) -> Result<(), FatalError> { - back::write::optimize(cgcx, diag_handler, module, config) + back::write::optimize(cgcx, dcx, module, config) } fn optimize_fat( cgcx: &CodegenContext, module: &mut ModuleCodegen, ) -> Result<(), FatalError> { - let diag_handler = cgcx.create_diag_handler(); - back::lto::run_pass_manager(cgcx, &diag_handler, module, false) + let dcx = cgcx.create_dcx(); + back::lto::run_pass_manager(cgcx, &dcx, module, false) } unsafe fn optimize_thin( cgcx: &CodegenContext, @@ -243,11 +242,11 @@ impl WriteBackendMethods for LlvmCodegenBackend { } unsafe fn codegen( cgcx: &CodegenContext, - diag_handler: &Handler, + dcx: &DiagCtxt, module: ModuleCodegen, config: &ModuleConfig, ) -> Result { - back::write::codegen(cgcx, diag_handler, module, config) + back::write::codegen(cgcx, dcx, module, config) } fn prepare_thin(module: ModuleCodegen) -> (String, Self::ThinBuffer) { back::lto::prepare_thin(module) @@ -307,7 +306,9 @@ impl CodegenBackend for LlvmCodegenBackend { } PrintKind::TlsModels => { writeln!(out, "Available TLS models:"); - for name in &["global-dynamic", "local-dynamic", "initial-exec", "local-exec"] { + for name in + &["global-dynamic", "local-dynamic", "initial-exec", "local-exec", "emulated"] + { writeln!(out, " {name}"); } writeln!(out); @@ -446,16 +447,16 @@ impl ModuleLlvm { cgcx: &CodegenContext, name: &CStr, buffer: &[u8], - handler: &Handler, + dcx: &DiagCtxt, ) -> Result { unsafe { let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); - let llmod_raw = back::lto::parse_module(llcx, name, buffer, handler)?; + let llmod_raw = back::lto::parse_module(llcx, name, buffer, dcx)?; let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, name.to_str().unwrap()); let tm = match (cgcx.tm_factory)(tm_factory_config) { Ok(m) => m, Err(e) => { - return Err(handler.emit_almost_fatal(ParseTargetMachineConfig(e))); + return Err(dcx.emit_almost_fatal(ParseTargetMachineConfig(e))); } }; diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 7fc02a95b..f936ba799 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -5,7 +5,7 @@ use super::debuginfo::{ DIArray, DIBasicType, DIBuilder, DICompositeType, DIDerivedType, DIDescriptor, DIEnumerator, DIFile, DIFlags, DIGlobalVariableExpression, DILexicalBlock, DILocation, DINameSpace, DISPFlags, DIScope, DISubprogram, DISubrange, DITemplateTypeParameter, DIType, DIVariable, - DebugEmissionKind, + DebugEmissionKind, DebugNameTableKind, }; use libc::{c_char, c_int, c_uint, size_t}; @@ -200,6 +200,7 @@ pub enum AttributeKind { AllocatedPointer = 38, AllocAlign = 39, SanitizeSafeStack = 40, + FnRetThunkExtern = 41, } /// LLVMIntPredicate @@ -793,6 +794,15 @@ pub mod debuginfo { } } } + + /// LLVMRustDebugNameTableKind + #[derive(Clone, Copy)] + #[repr(C)] + pub enum DebugNameTableKind { + Default, + Gnu, + None, + } } use bitflags::bitflags; @@ -823,11 +833,7 @@ pub type GetSymbolsCallback = unsafe extern "C" fn(*mut c_void, *const c_char) - pub type GetSymbolsErrorCallback = unsafe extern "C" fn(*const c_char) -> *mut c_void; extern "C" { - pub fn LLVMRustInstallFatalErrorHandler(); - pub fn LLVMRustDisableSystemDialogsOnCrash(); - // Create and destroy contexts. - pub fn LLVMRustContextCreate(shouldDiscardNames: bool) -> &'static mut Context; pub fn LLVMContextDispose(C: &'static mut Context); pub fn LLVMGetMDKindIDInContext(C: &Context, Name: *const c_char, SLen: c_uint) -> c_uint; @@ -843,9 +849,6 @@ extern "C" { /// See Module::setModuleInlineAsm. pub fn LLVMAppendModuleInlineAsm(M: &Module, Asm: *const c_char, Len: size_t); - /// See llvm::LLVMTypeKind::getTypeID. - pub fn LLVMRustGetTypeKind(Ty: &Type) -> TypeKind; - // Operations on integer types pub fn LLVMInt1TypeInContext(C: &Context) -> &Type; pub fn LLVMInt8TypeInContext(C: &Context) -> &Type; @@ -879,7 +882,6 @@ extern "C" { ) -> &'a Type; // Operations on array, pointer, and vector types (sequence types) - pub fn LLVMRustArrayType(ElementType: &Type, ElementCount: u64) -> &Type; pub fn LLVMPointerTypeInContext(C: &Context, AddressSpace: c_uint) -> &Type; pub fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type; @@ -898,10 +900,7 @@ extern "C" { pub fn LLVMReplaceAllUsesWith<'a>(OldVal: &'a Value, NewVal: &'a Value); pub fn LLVMSetMetadata<'a>(Val: &'a Value, KindID: c_uint, Node: &'a Value); pub fn LLVMGlobalSetMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata); - pub fn LLVMRustGlobalAddMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata); pub fn LLVMValueAsMetadata(Node: &Value) -> &Metadata; - pub fn LLVMIsAFunction(Val: &Value) -> Option<&Value>; - pub fn LLVMRustIsNonGVFunctionPointerTy(Val: &Value) -> bool; // Operations on constants of any type pub fn LLVMConstNull(Ty: &Type) -> &Value; @@ -931,13 +930,6 @@ extern "C" { pub fn LLVMConstInt(IntTy: &Type, N: c_ulonglong, SignExtend: Bool) -> &Value; pub fn LLVMConstIntOfArbitraryPrecision(IntTy: &Type, Wn: c_uint, Ws: *const u64) -> &Value; pub fn LLVMConstReal(RealTy: &Type, N: f64) -> &Value; - pub fn LLVMRustConstIntGetZExtValue(ConstantVal: &ConstantInt, Value: &mut u64) -> bool; - pub fn LLVMRustConstInt128Get( - ConstantVal: &ConstantInt, - SExt: bool, - high: &mut u64, - low: &mut u64, - ) -> bool; // Operations on composite constants pub fn LLVMConstStringInContext( @@ -972,17 +964,11 @@ extern "C" { pub fn LLVMConstPtrToInt<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; pub fn LLVMConstIntToPtr<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; pub fn LLVMConstBitCast<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; - pub fn LLVMConstPointerCast<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; pub fn LLVMGetAggregateElement(ConstantVal: &Value, Idx: c_uint) -> Option<&Value>; // Operations on global variables, functions, and aliases (globals) pub fn LLVMIsDeclaration(Global: &Value) -> Bool; - pub fn LLVMRustGetLinkage(Global: &Value) -> Linkage; - pub fn LLVMRustSetLinkage(Global: &Value, RustLinkage: Linkage); pub fn LLVMSetSection(Global: &Value, Section: *const c_char); - pub fn LLVMRustGetVisibility(Global: &Value) -> Visibility; - pub fn LLVMRustSetVisibility(Global: &Value, Viz: Visibility); - pub fn LLVMRustSetDSOLocal(Global: &Value, is_dso_local: bool); pub fn LLVMGetAlignment(Global: &Value) -> c_uint; pub fn LLVMSetAlignment(Global: &Value, Bytes: c_uint); pub fn LLVMSetDLLStorageClass(V: &Value, C: DLLStorageClass); @@ -991,13 +977,6 @@ extern "C" { pub fn LLVMIsAGlobalVariable(GlobalVar: &Value) -> Option<&Value>; pub fn LLVMAddGlobal<'a>(M: &'a Module, Ty: &'a Type, Name: *const c_char) -> &'a Value; pub fn LLVMGetNamedGlobal(M: &Module, Name: *const c_char) -> Option<&Value>; - pub fn LLVMRustGetOrInsertGlobal<'a>( - M: &'a Module, - Name: *const c_char, - NameLen: size_t, - T: &'a Type, - ) -> &'a Value; - pub fn LLVMRustInsertPrivateGlobal<'a>(M: &'a Module, T: &'a Type) -> &'a Value; pub fn LLVMGetFirstGlobal(M: &Module) -> Option<&Value>; pub fn LLVMGetNextGlobal(GlobalVar: &Value) -> Option<&Value>; pub fn LLVMDeleteGlobal(GlobalVar: &Value); @@ -1007,16 +986,9 @@ extern "C" { pub fn LLVMSetThreadLocalMode(GlobalVar: &Value, Mode: ThreadLocalMode); pub fn LLVMIsGlobalConstant(GlobalVar: &Value) -> Bool; pub fn LLVMSetGlobalConstant(GlobalVar: &Value, IsConstant: Bool); - pub fn LLVMRustGetNamedValue( - M: &Module, - Name: *const c_char, - NameLen: size_t, - ) -> Option<&Value>; pub fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool); - pub fn LLVMRustSetTailCallKind(CallInst: &Value, TKC: TailCallKind); // Operations on attributes - pub fn LLVMRustCreateAttrNoValue(C: &Context, attr: AttributeKind) -> &Attribute; pub fn LLVMCreateStringAttribute( C: &Context, Name: *const c_char, @@ -1024,31 +996,9 @@ extern "C" { Value: *const c_char, ValueLen: c_uint, ) -> &Attribute; - pub fn LLVMRustCreateAlignmentAttr(C: &Context, bytes: u64) -> &Attribute; - pub fn LLVMRustCreateDereferenceableAttr(C: &Context, bytes: u64) -> &Attribute; - pub fn LLVMRustCreateDereferenceableOrNullAttr(C: &Context, bytes: u64) -> &Attribute; - pub fn LLVMRustCreateByValAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute; - pub fn LLVMRustCreateStructRetAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute; - pub fn LLVMRustCreateElementTypeAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute; - pub fn LLVMRustCreateUWTableAttr(C: &Context, async_: bool) -> &Attribute; - pub fn LLVMRustCreateAllocSizeAttr(C: &Context, size_arg: u32) -> &Attribute; - pub fn LLVMRustCreateAllocKindAttr(C: &Context, size_arg: u64) -> &Attribute; - pub fn LLVMRustCreateMemoryEffectsAttr(C: &Context, effects: MemoryEffects) -> &Attribute; // Operations on functions - pub fn LLVMRustGetOrInsertFunction<'a>( - M: &'a Module, - Name: *const c_char, - NameLen: size_t, - FunctionTy: &'a Type, - ) -> &'a Value; pub fn LLVMSetFunctionCallConv(Fn: &Value, CC: c_uint); - pub fn LLVMRustAddFunctionAttributes<'a>( - Fn: &'a Value, - index: c_uint, - Attrs: *const &'a Attribute, - AttrsLen: size_t, - ); // Operations on parameters pub fn LLVMIsAArgument(Val: &Value) -> Option<&Value>; @@ -1069,12 +1019,6 @@ extern "C" { // Operations on call sites pub fn LLVMSetInstructionCallConv(Instr: &Value, CC: c_uint); - pub fn LLVMRustAddCallSiteAttributes<'a>( - Instr: &'a Value, - index: c_uint, - Attrs: *const &'a Attribute, - AttrsLen: size_t, - ); // Operations on load/store instructions (only) pub fn LLVMSetVolatile(MemoryAccessInst: &Value, volatile: Bool); @@ -1112,18 +1056,6 @@ extern "C" { Else: &'a BasicBlock, NumCases: c_uint, ) -> &'a Value; - pub fn LLVMRustBuildInvoke<'a>( - B: &Builder<'a>, - Ty: &'a Type, - Fn: &'a Value, - Args: *const &'a Value, - NumArgs: c_uint, - Then: &'a BasicBlock, - Catch: &'a BasicBlock, - OpBundles: *const &OperandBundleDef<'a>, - NumOpBundles: c_uint, - Name: *const c_char, - ) -> &'a Value; pub fn LLVMBuildLandingPad<'a>( B: &Builder<'a>, Ty: &'a Type, @@ -1337,7 +1269,6 @@ extern "C" { pub fn LLVMBuildNeg<'a>(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value; pub fn LLVMBuildFNeg<'a>(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value; pub fn LLVMBuildNot<'a>(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value; - pub fn LLVMRustSetFastMath(Instr: &Value); // Memory pub fn LLVMBuildAlloca<'a>(B: &Builder<'a>, Ty: &'a Type, Name: *const c_char) -> &'a Value; @@ -1485,42 +1416,6 @@ extern "C" { // Miscellaneous instructions pub fn LLVMBuildPhi<'a>(B: &Builder<'a>, Ty: &'a Type, Name: *const c_char) -> &'a Value; - pub fn LLVMRustGetInstrProfIncrementIntrinsic(M: &Module) -> &Value; - pub fn LLVMRustBuildCall<'a>( - B: &Builder<'a>, - Ty: &'a Type, - Fn: &'a Value, - Args: *const &'a Value, - NumArgs: c_uint, - OpBundles: *const &OperandBundleDef<'a>, - NumOpBundles: c_uint, - ) -> &'a Value; - pub fn LLVMRustBuildMemCpy<'a>( - B: &Builder<'a>, - Dst: &'a Value, - DstAlign: c_uint, - Src: &'a Value, - SrcAlign: c_uint, - Size: &'a Value, - IsVolatile: bool, - ) -> &'a Value; - pub fn LLVMRustBuildMemMove<'a>( - B: &Builder<'a>, - Dst: &'a Value, - DstAlign: c_uint, - Src: &'a Value, - SrcAlign: c_uint, - Size: &'a Value, - IsVolatile: bool, - ) -> &'a Value; - pub fn LLVMRustBuildMemSet<'a>( - B: &Builder<'a>, - Dst: &'a Value, - DstAlign: c_uint, - Val: &'a Value, - Size: &'a Value, - IsVolatile: bool, - ) -> &'a Value; pub fn LLVMBuildSelect<'a>( B: &Builder<'a>, If: &'a Value, @@ -1568,6 +1463,202 @@ extern "C" { Name: *const c_char, ) -> &'a Value; + // Atomic Operations + pub fn LLVMBuildAtomicCmpXchg<'a>( + B: &Builder<'a>, + LHS: &'a Value, + CMP: &'a Value, + RHS: &'a Value, + Order: AtomicOrdering, + FailureOrder: AtomicOrdering, + SingleThreaded: Bool, + ) -> &'a Value; + + pub fn LLVMSetWeak(CmpXchgInst: &Value, IsWeak: Bool); + + pub fn LLVMBuildAtomicRMW<'a>( + B: &Builder<'a>, + Op: AtomicRmwBinOp, + LHS: &'a Value, + RHS: &'a Value, + Order: AtomicOrdering, + SingleThreaded: Bool, + ) -> &'a Value; + + pub fn LLVMBuildFence<'a>( + B: &Builder<'a>, + Order: AtomicOrdering, + SingleThreaded: Bool, + Name: *const c_char, + ) -> &'a Value; + + /// Writes a module to the specified path. Returns 0 on success. + pub fn LLVMWriteBitcodeToFile(M: &Module, Path: *const c_char) -> c_int; + + /// Creates a legacy pass manager -- only used for final codegen. + pub fn LLVMCreatePassManager<'a>() -> &'a mut PassManager<'a>; + + pub fn LLVMAddAnalysisPasses<'a>(T: &'a TargetMachine, PM: &PassManager<'a>); + + pub fn LLVMGetHostCPUFeatures() -> *mut c_char; + + pub fn LLVMDisposeMessage(message: *mut c_char); + + pub fn LLVMIsMultithreaded() -> Bool; + + pub fn LLVMStructCreateNamed(C: &Context, Name: *const c_char) -> &Type; + + pub fn LLVMStructSetBody<'a>( + StructTy: &'a Type, + ElementTypes: *const &'a Type, + ElementCount: c_uint, + Packed: Bool, + ); + + pub fn LLVMMetadataAsValue<'a>(C: &'a Context, MD: &'a Metadata) -> &'a Value; + + pub fn LLVMSetUnnamedAddress(Global: &Value, UnnamedAddr: UnnamedAddr); + + pub fn LLVMIsAConstantInt(value_ref: &Value) -> Option<&ConstantInt>; +} + +#[link(name = "llvm-wrapper", kind = "static")] +extern "C" { + pub fn LLVMRustInstallFatalErrorHandler(); + pub fn LLVMRustDisableSystemDialogsOnCrash(); + + // Create and destroy contexts. + pub fn LLVMRustContextCreate(shouldDiscardNames: bool) -> &'static mut Context; + + /// See llvm::LLVMTypeKind::getTypeID. + pub fn LLVMRustGetTypeKind(Ty: &Type) -> TypeKind; + + // Operations on array, pointer, and vector types (sequence types) + pub fn LLVMRustArrayType(ElementType: &Type, ElementCount: u64) -> &Type; + + // Operations on all values + pub fn LLVMRustGlobalAddMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata); + pub fn LLVMRustIsNonGVFunctionPointerTy(Val: &Value) -> bool; + + // Operations on scalar constants + pub fn LLVMRustConstIntGetZExtValue(ConstantVal: &ConstantInt, Value: &mut u64) -> bool; + pub fn LLVMRustConstInt128Get( + ConstantVal: &ConstantInt, + SExt: bool, + high: &mut u64, + low: &mut u64, + ) -> bool; + + // Operations on global variables, functions, and aliases (globals) + pub fn LLVMRustGetLinkage(Global: &Value) -> Linkage; + pub fn LLVMRustSetLinkage(Global: &Value, RustLinkage: Linkage); + pub fn LLVMRustGetVisibility(Global: &Value) -> Visibility; + pub fn LLVMRustSetVisibility(Global: &Value, Viz: Visibility); + pub fn LLVMRustSetDSOLocal(Global: &Value, is_dso_local: bool); + + // Operations on global variables + pub fn LLVMRustGetOrInsertGlobal<'a>( + M: &'a Module, + Name: *const c_char, + NameLen: size_t, + T: &'a Type, + ) -> &'a Value; + pub fn LLVMRustInsertPrivateGlobal<'a>(M: &'a Module, T: &'a Type) -> &'a Value; + pub fn LLVMRustGetNamedValue( + M: &Module, + Name: *const c_char, + NameLen: size_t, + ) -> Option<&Value>; + pub fn LLVMRustSetTailCallKind(CallInst: &Value, TKC: TailCallKind); + + // Operations on attributes + pub fn LLVMRustCreateAttrNoValue(C: &Context, attr: AttributeKind) -> &Attribute; + pub fn LLVMRustCreateAlignmentAttr(C: &Context, bytes: u64) -> &Attribute; + pub fn LLVMRustCreateDereferenceableAttr(C: &Context, bytes: u64) -> &Attribute; + pub fn LLVMRustCreateDereferenceableOrNullAttr(C: &Context, bytes: u64) -> &Attribute; + pub fn LLVMRustCreateByValAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute; + pub fn LLVMRustCreateStructRetAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute; + pub fn LLVMRustCreateElementTypeAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute; + pub fn LLVMRustCreateUWTableAttr(C: &Context, async_: bool) -> &Attribute; + pub fn LLVMRustCreateAllocSizeAttr(C: &Context, size_arg: u32) -> &Attribute; + pub fn LLVMRustCreateAllocKindAttr(C: &Context, size_arg: u64) -> &Attribute; + pub fn LLVMRustCreateMemoryEffectsAttr(C: &Context, effects: MemoryEffects) -> &Attribute; + + // Operations on functions + pub fn LLVMRustGetOrInsertFunction<'a>( + M: &'a Module, + Name: *const c_char, + NameLen: size_t, + FunctionTy: &'a Type, + ) -> &'a Value; + pub fn LLVMRustAddFunctionAttributes<'a>( + Fn: &'a Value, + index: c_uint, + Attrs: *const &'a Attribute, + AttrsLen: size_t, + ); + + // Operations on call sites + pub fn LLVMRustAddCallSiteAttributes<'a>( + Instr: &'a Value, + index: c_uint, + Attrs: *const &'a Attribute, + AttrsLen: size_t, + ); + + pub fn LLVMRustBuildInvoke<'a>( + B: &Builder<'a>, + Ty: &'a Type, + Fn: &'a Value, + Args: *const &'a Value, + NumArgs: c_uint, + Then: &'a BasicBlock, + Catch: &'a BasicBlock, + OpBundles: *const &OperandBundleDef<'a>, + NumOpBundles: c_uint, + Name: *const c_char, + ) -> &'a Value; + + pub fn LLVMRustSetFastMath(Instr: &Value); + + // Miscellaneous instructions + pub fn LLVMRustGetInstrProfIncrementIntrinsic(M: &Module) -> &Value; + pub fn LLVMRustBuildCall<'a>( + B: &Builder<'a>, + Ty: &'a Type, + Fn: &'a Value, + Args: *const &'a Value, + NumArgs: c_uint, + OpBundles: *const &OperandBundleDef<'a>, + NumOpBundles: c_uint, + ) -> &'a Value; + pub fn LLVMRustBuildMemCpy<'a>( + B: &Builder<'a>, + Dst: &'a Value, + DstAlign: c_uint, + Src: &'a Value, + SrcAlign: c_uint, + Size: &'a Value, + IsVolatile: bool, + ) -> &'a Value; + pub fn LLVMRustBuildMemMove<'a>( + B: &Builder<'a>, + Dst: &'a Value, + DstAlign: c_uint, + Src: &'a Value, + SrcAlign: c_uint, + Size: &'a Value, + IsVolatile: bool, + ) -> &'a Value; + pub fn LLVMRustBuildMemSet<'a>( + B: &Builder<'a>, + Dst: &'a Value, + DstAlign: c_uint, + Val: &'a Value, + Size: &'a Value, + IsVolatile: bool, + ) -> &'a Value; + pub fn LLVMRustBuildVectorReduceFAdd<'a>( B: &Builder<'a>, Acc: &'a Value, @@ -1623,53 +1714,11 @@ extern "C" { Order: AtomicOrdering, ) -> &'a Value; - pub fn LLVMBuildAtomicCmpXchg<'a>( - B: &Builder<'a>, - LHS: &'a Value, - CMP: &'a Value, - RHS: &'a Value, - Order: AtomicOrdering, - FailureOrder: AtomicOrdering, - SingleThreaded: Bool, - ) -> &'a Value; + pub fn LLVMRustTimeTraceProfilerInitialize(); - pub fn LLVMSetWeak(CmpXchgInst: &Value, IsWeak: Bool); + pub fn LLVMRustTimeTraceProfilerFinishThread(); - pub fn LLVMBuildAtomicRMW<'a>( - B: &Builder<'a>, - Op: AtomicRmwBinOp, - LHS: &'a Value, - RHS: &'a Value, - Order: AtomicOrdering, - SingleThreaded: Bool, - ) -> &'a Value; - - pub fn LLVMBuildFence<'a>( - B: &Builder<'a>, - Order: AtomicOrdering, - SingleThreaded: Bool, - Name: *const c_char, - ) -> &'a Value; - - /// Writes a module to the specified path. Returns 0 on success. - pub fn LLVMWriteBitcodeToFile(M: &Module, Path: *const c_char) -> c_int; - - /// Creates a legacy pass manager -- only used for final codegen. - pub fn LLVMCreatePassManager<'a>() -> &'a mut PassManager<'a>; - - pub fn LLVMTimeTraceProfilerInitialize(); - - pub fn LLVMTimeTraceProfilerFinishThread(); - - pub fn LLVMTimeTraceProfilerFinish(FileName: *const c_char); - - pub fn LLVMAddAnalysisPasses<'a>(T: &'a TargetMachine, PM: &PassManager<'a>); - - pub fn LLVMGetHostCPUFeatures() -> *mut c_char; - - pub fn LLVMDisposeMessage(message: *mut c_char); - - pub fn LLVMIsMultithreaded() -> Bool; + pub fn LLVMRustTimeTraceProfilerFinish(FileName: *const c_char); /// Returns a string describing the last error caused by an LLVMRust* call. pub fn LLVMRustGetLastError() -> *const c_char; @@ -1680,15 +1729,6 @@ extern "C" { /// Print the statistics since static dtors aren't picking them up. pub fn LLVMRustPrintStatistics(size: *const size_t) -> *const c_char; - pub fn LLVMStructCreateNamed(C: &Context, Name: *const c_char) -> &Type; - - pub fn LLVMStructSetBody<'a>( - StructTy: &'a Type, - ElementTypes: *const &'a Type, - ElementCount: c_uint, - Packed: Bool, - ); - /// Prepares inline assembly. pub fn LLVMRustInlineAsm( Ty: &Type, @@ -1761,8 +1801,6 @@ extern "C" { ); pub fn LLVMRustHasModuleFlag(M: &Module, name: *const c_char, len: size_t) -> bool; - pub fn LLVMMetadataAsValue<'a>(C: &'a Context, MD: &'a Metadata) -> &'a Value; - pub fn LLVMRustDIBuilderCreate(M: &Module) -> &mut DIBuilder<'_>; pub fn LLVMRustDIBuilderDispose<'a>(Builder: &'a mut DIBuilder<'a>); @@ -1783,6 +1821,7 @@ extern "C" { kind: DebugEmissionKind, DWOId: u64, SplitDebugInlining: bool, + DebugNameTableKind: DebugNameTableKind, ) -> &'a DIDescriptor; pub fn LLVMRustDIBuilderCreateFile<'a>( @@ -2052,8 +2091,6 @@ extern "C" { UniqueIdLen: size_t, ) -> &'a DIDerivedType; - pub fn LLVMSetUnnamedAddress(Global: &Value, UnnamedAddr: UnnamedAddr); - pub fn LLVMRustDIBuilderCreateTemplateTypeParameter<'a>( Builder: &DIBuilder<'a>, Scope: Option<&'a DIScope>, @@ -2092,8 +2129,6 @@ extern "C" { #[allow(improper_ctypes)] pub fn LLVMRustWriteValueToString(value_ref: &Value, s: &RustString); - pub fn LLVMIsAConstantInt(value_ref: &Value) -> Option<&ConstantInt>; - pub fn LLVMRustHasFeature(T: &TargetMachine, s: *const c_char) -> bool; pub fn LLVMRustPrintTargetCPUs( @@ -2134,7 +2169,7 @@ extern "C" { SplitDwarfFile: *const c_char, OutputObjFile: *const c_char, DebugInfoCompression: *const c_char, - ForceEmulatedTls: bool, + UseEmulatedTls: bool, ArgsCstrBuff: *const c_char, ArgsCstrBuffLen: usize, ) -> *mut TargetMachine; @@ -2320,11 +2355,6 @@ extern "C" { len: usize, Identifier: *const c_char, ) -> Option<&Module>; - pub fn LLVMRustGetBitcodeSliceFromObjectData( - Data: *const u8, - len: usize, - out_len: &mut usize, - ) -> *const u8; pub fn LLVMRustGetSliceFromObjectDataByName( data: *const u8, len: usize, diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index cc4ccaf19..08519723e 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -5,9 +5,6 @@ use crate::errors::{ }; use crate::llvm; use libc::c_int; -use rustc_codegen_ssa::target_features::{ - supported_target_features, tied_target_features, RUSTC_SPECIFIC_FEATURES, -}; use rustc_codegen_ssa::traits::PrintBackendInfo; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::small_c_str::SmallCStr; @@ -17,6 +14,7 @@ use rustc_session::config::{PrintKind, PrintRequest}; use rustc_session::Session; use rustc_span::symbol::Symbol; use rustc_target::spec::{MergeFunctions, PanicStrategy}; +use rustc_target::target_features::RUSTC_SPECIFIC_FEATURES; use std::ffi::{c_char, c_void, CStr, CString}; use std::path::Path; @@ -121,7 +119,7 @@ unsafe fn configure_llvm(sess: &Session) { } if sess.opts.unstable_opts.llvm_time_trace { - llvm::LLVMTimeTraceProfilerInitialize(); + llvm::LLVMRustTimeTraceProfilerInitialize(); } rustc_llvm::initialize_available_targets(); @@ -132,7 +130,7 @@ unsafe fn configure_llvm(sess: &Session) { pub fn time_trace_profiler_finish(file_name: &Path) { unsafe { let file_name = path_to_c_string(file_name); - llvm::LLVMTimeTraceProfilerFinish(file_name.as_ptr()); + llvm::LLVMRustTimeTraceProfilerFinish(file_name.as_ptr()); } } @@ -263,6 +261,10 @@ pub fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> LLVMFeature<'a> { "sve2-bitperm", TargetFeatureFoldStrength::EnableOnly("neon"), ), + // The unaligned-scalar-mem feature was renamed to fast-unaligned-access. + ("riscv32" | "riscv64", "fast-unaligned-access") if get_version().0 <= 17 => { + LLVMFeature::new("unaligned-scalar-mem") + } (_, s) => LLVMFeature::new(s), } } @@ -274,7 +276,7 @@ pub fn check_tied_features( features: &FxHashMap<&str, bool>, ) -> Option<&'static [&'static str]> { if !features.is_empty() { - for tied in tied_target_features(sess) { + for tied in sess.target.tied_target_features() { // Tied features must be set to the same value, or not set at all let mut tied_iter = tied.iter(); let enabled = features.get(tied_iter.next().unwrap()); @@ -290,10 +292,11 @@ pub fn check_tied_features( /// Must express features in the way Rust understands them pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec { let target_machine = create_informational_target_machine(sess); - supported_target_features(sess) + sess.target + .supported_target_features() .iter() .filter_map(|&(feature, gate)| { - if sess.is_nightly_build() || allow_unstable || gate.is_none() { + if sess.is_nightly_build() || allow_unstable || gate.is_stable() { Some(feature) } else { None @@ -358,7 +361,9 @@ fn llvm_target_features(tm: &llvm::TargetMachine) -> Vec<(&str, &str)> { fn print_target_features(out: &mut dyn PrintBackendInfo, sess: &Session, tm: &llvm::TargetMachine) { let mut llvm_target_features = llvm_target_features(tm); let mut known_llvm_target_features = FxHashSet::<&'static str>::default(); - let mut rustc_target_features = supported_target_features(sess) + let mut rustc_target_features = sess + .target + .supported_target_features() .iter() .map(|(feature, _gate)| { // LLVM asserts that these are sorted. LLVM and Rust both use byte comparison for these strings. @@ -511,7 +516,7 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec Vec Vec BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } impl Type { - pub fn i8_llcx(llcx: &llvm::Context) -> &Type { - unsafe { llvm::LLVMInt8TypeInContext(llcx) } - } - /// Creates an integer type with the given number of bits, e.g., i24 pub fn ix_llcx(llcx: &llvm::Context, num_bits: u64) -> &Type { unsafe { llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) } diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 712b6ed53..624ce6d88 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -9,7 +9,7 @@ use rustc_middle::ty::{self, Ty, TypeVisitableExt}; use rustc_target::abi::HasDataLayout; use rustc_target::abi::{Abi, Align, FieldsShape}; use rustc_target::abi::{Int, Pointer, F32, F64}; -use rustc_target::abi::{PointeeInfo, Scalar, Size, TyAbiInterface, Variants}; +use rustc_target::abi::{Scalar, Size, Variants}; use smallvec::{smallvec, SmallVec}; use std::fmt::Write; @@ -184,7 +184,6 @@ pub trait LayoutLlvmExt<'tcx> { immediate: bool, ) -> &'a Type; fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64; - fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option; fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type>; } @@ -356,20 +355,6 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { } } - // FIXME(eddyb) this having the same name as `TyAndLayout::pointee_info_at` - // (the inherent method, which is lacking this caching logic) can result in - // the uncached version being called - not wrong, but potentially inefficient. - fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option { - if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) { - return pointee; - } - - let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset); - - cx.pointee_infos.borrow_mut().insert((self.ty, offset), result); - result - } - fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type> { debug_assert!(self.is_sized()); -- cgit v1.2.3