diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 18:31:44 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 18:31:44 +0000 |
commit | c23a457e72abe608715ac76f076f47dc42af07a5 (patch) | |
tree | 2772049aaf84b5c9d0ed12ec8d86812f7a7904b6 /compiler/rustc_codegen_ssa/src | |
parent | Releasing progress-linux version 1.73.0+dfsg1-1~progress7.99u1. (diff) | |
download | rustc-c23a457e72abe608715ac76f076f47dc42af07a5.tar.xz rustc-c23a457e72abe608715ac76f076f47dc42af07a5.zip |
Merging upstream version 1.74.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_ssa/src')
21 files changed, 373 insertions, 421 deletions
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs index a7ac728c5..c4a0f6291 100644 --- a/compiler/rustc_codegen_ssa/src/back/link.rs +++ b/compiler/rustc_codegen_ssa/src/back/link.rs @@ -365,15 +365,9 @@ fn link_rlib<'a>( // loaded from the libraries found here and then encode that into the // metadata of the rlib we're generating somehow. for lib in codegen_results.crate_info.used_libraries.iter() { - let NativeLibKind::Static { bundle: None | Some(true), whole_archive } = lib.kind else { + let NativeLibKind::Static { bundle: None | Some(true), .. } = lib.kind else { continue; }; - if whole_archive == Some(true) - && flavor == RlibFlavor::Normal - && !codegen_results.crate_info.feature_packed_bundled_libs - { - sess.emit_err(errors::IncompatibleLinkingModifiers); - } if flavor == RlibFlavor::Normal && let Some(filename) = lib.filename { let path = find_native_static_library(filename.as_str(), true, &lib_search_paths, sess); let src = read(path).map_err(|e| sess.emit_fatal(errors::ReadFileError {message: e }))?; diff --git a/compiler/rustc_codegen_ssa/src/back/metadata.rs b/compiler/rustc_codegen_ssa/src/back/metadata.rs index 4c8547407..c6f4bd35e 100644 --- a/compiler/rustc_codegen_ssa/src/back/metadata.rs +++ b/compiler/rustc_codegen_ssa/src/back/metadata.rs @@ -226,9 +226,7 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static let mut file = write::Object::new(binary_format, architecture, endianness); if sess.target.is_like_osx { - if let Some(build_version) = macho_object_build_version_for_target(&sess.target) { - file.set_macho_build_version(build_version) - } + file.set_macho_build_version(macho_object_build_version_for_target(&sess.target)) } let e_flags = match architecture { Architecture::Mips => { @@ -334,31 +332,28 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static Some(file) } -/// Apple's LD, when linking for Mac Catalyst, requires object files to -/// contain information about what they were built for (LC_BUILD_VERSION): -/// the platform (macOS/watchOS etc), minimum OS version, and SDK version. -/// This returns a `MachOBuildVersion` if necessary for the target. -fn macho_object_build_version_for_target( - target: &Target, -) -> Option<object::write::MachOBuildVersion> { - if !target.llvm_target.ends_with("-macabi") { - return None; - } +/// Since Xcode 15 Apple's LD requires object files to contain information about what they were +/// built for (LC_BUILD_VERSION): the platform (macOS/watchOS etc), minimum OS version, and SDK +/// version. This returns a `MachOBuildVersion` for the target. +fn macho_object_build_version_for_target(target: &Target) -> object::write::MachOBuildVersion { /// The `object` crate demands "X.Y.Z encoded in nibbles as xxxx.yy.zz" /// e.g. minOS 14.0 = 0x000E0000, or SDK 16.2 = 0x00100200 fn pack_version((major, minor): (u32, u32)) -> u32 { (major << 16) | (minor << 8) } - let platform = object::macho::PLATFORM_MACCATALYST; - let min_os = (14, 0); - let sdk = (16, 2); + let platform = + rustc_target::spec::current_apple_platform(target).expect("unknown Apple target OS"); + let min_os = rustc_target::spec::current_apple_deployment_target(target) + .expect("unknown Apple target OS"); + let sdk = + rustc_target::spec::current_apple_sdk_version(platform).expect("unknown Apple target OS"); let mut build_version = object::write::MachOBuildVersion::default(); build_version.platform = platform; build_version.minos = pack_version(min_os); build_version.sdk = pack_version(sdk); - Some(build_version) + build_version } pub enum MetadataPosition { diff --git a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs index 8fb2ccb7e..9cd439410 100644 --- a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs +++ b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs @@ -11,10 +11,10 @@ use rustc_middle::middle::exported_symbols::{ metadata_symbol_name, ExportedSymbol, SymbolExportInfo, SymbolExportKind, SymbolExportLevel, }; use rustc_middle::query::LocalCrate; -use rustc_middle::query::{ExternProviders, Providers}; use rustc_middle::ty::Instance; use rustc_middle::ty::{self, SymbolName, TyCtxt}; use rustc_middle::ty::{GenericArgKind, GenericArgsRef}; +use rustc_middle::util::Providers; use rustc_session::config::{CrateType, OomStrategy}; use rustc_target::spec::SanitizerSet; @@ -334,7 +334,7 @@ fn exported_symbols_provider_local( match *mono_item { MonoItem::Fn(Instance { def: InstanceDef::Item(def), args }) => { - if args.non_erasable_generics().next().is_some() { + if args.non_erasable_generics(tcx, def).next().is_some() { let symbol = ExportedSymbol::Generic(def, args); symbols.push(( symbol, @@ -346,10 +346,10 @@ fn exported_symbols_provider_local( )); } } - MonoItem::Fn(Instance { def: InstanceDef::DropGlue(_, Some(ty)), args }) => { + MonoItem::Fn(Instance { def: InstanceDef::DropGlue(def_id, Some(ty)), args }) => { // A little sanity-check debug_assert_eq!( - args.non_erasable_generics().next(), + args.non_erasable_generics(tcx, def_id).next(), Some(GenericArgKind::Type(ty)) ); symbols.push(( @@ -457,11 +457,9 @@ pub fn provide(providers: &mut Providers) { providers.is_unreachable_local_definition = is_unreachable_local_definition_provider; providers.upstream_drop_glue_for = upstream_drop_glue_for_provider; providers.wasm_import_module_map = wasm_import_module_map; -} - -pub fn provide_extern(providers: &mut ExternProviders) { - providers.is_reachable_non_generic = is_reachable_non_generic_provider_extern; - providers.upstream_monomorphizations_for = upstream_monomorphizations_for_provider; + providers.extern_queries.is_reachable_non_generic = is_reachable_non_generic_provider_extern; + providers.extern_queries.upstream_monomorphizations_for = + upstream_monomorphizations_for_provider; } fn symbol_export_level(tcx: TyCtxt<'_>, sym_def_id: DefId) -> SymbolExportLevel { diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs index f485af00b..f192747c8 100644 --- a/compiler/rustc_codegen_ssa/src/back/write.rs +++ b/compiler/rustc_codegen_ssa/src/back/write.rs @@ -286,6 +286,10 @@ pub struct TargetMachineFactoryConfig { /// so the path to the dwarf object has to be provided when we create the target machine. /// This can be ignored by backends which do not need it for their Split DWARF support. pub split_dwarf_file: Option<PathBuf>, + + /// The name of the output object file. Used for setting OutputFilenames in target options + /// so that LLVM can emit the CodeView S_OBJNAME record in pdb files + pub output_obj_file: Option<PathBuf>, } impl TargetMachineFactoryConfig { @@ -302,7 +306,10 @@ impl TargetMachineFactoryConfig { } else { None }; - TargetMachineFactoryConfig { split_dwarf_file } + + let output_obj_file = + Some(cgcx.output_filenames.temp_path(OutputType::Object, Some(module_name))); + TargetMachineFactoryConfig { split_dwarf_file, output_obj_file } } } @@ -343,6 +350,12 @@ pub struct CodegenContext<B: WriteBackendMethods> { pub split_debuginfo: rustc_target::spec::SplitDebuginfo, pub split_dwarf_kind: rustc_session::config::SplitDwarfKind, + /// All commandline args used to invoke the compiler, with @file args fully expanded. + /// This will only be used within debug info, e.g. in the pdb file on windows + /// This is mainly useful for other tools that reads that debuginfo to figure out + /// how to call the compiler with the same arguments. + pub expanded_args: Vec<String>, + /// Handler to use for diagnostics produced during codegen. pub diag_emitter: SharedEmitter, /// LLVM optimizations for which we want to print remarks. @@ -1108,6 +1121,7 @@ fn start_executing_work<B: ExtraBackendMethods>( incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(), coordinator_send, + expanded_args: tcx.sess.expanded_args.clone(), diag_emitter: shared_emitter.clone(), output_filenames: tcx.output_filenames(()).clone(), regular_module_config: regular_config, diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index aa003e4e8..1e4ea73a1 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -181,7 +181,7 @@ pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( old_info } } - (_, &ty::Dynamic(ref data, _, _)) => meth::get_vtable(cx, source, data.principal()), + (_, ty::Dynamic(data, _, _)) => meth::get_vtable(cx, source, data.principal()), _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target), } } @@ -202,7 +202,7 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( (src, unsized_info(bx, a, b, old_info)) } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { - assert_eq!(def_a, def_b); + assert_eq!(def_a, def_b); // implies same number of fields let src_layout = bx.cx().layout_of(src_ty); let dst_layout = bx.cx().layout_of(dst_ty); if src_ty == dst_ty { @@ -211,7 +211,8 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let mut result = None; for i in 0..src_layout.fields.count() { let src_f = src_layout.field(bx.cx(), i); - if src_f.is_zst() { + if src_f.is_1zst() { + // We are looking for the one non-1-ZST field; this is not it. continue; } @@ -272,13 +273,14 @@ pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( } (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { - assert_eq!(def_a, def_b); + assert_eq!(def_a, def_b); // implies same number of fields for i in def_a.variant(FIRST_VARIANT).fields.indices() { let src_f = src.project_field(bx, i.as_usize()); let dst_f = dst.project_field(bx, i.as_usize()); if dst_f.layout.is_zst() { + // No data here, nothing to copy/coerce. continue; } @@ -418,9 +420,11 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( rust_main_def_id: DefId, entry_type: EntryFnType, ) -> Bx::Function { - // The entry function is either `int main(void)` or `int main(int argc, char **argv)`, - // depending on whether the target needs `argc` and `argv` to be passed in. - let llfty = if cx.sess().target.main_needs_argc_argv { + // The entry function is either `int main(void)` or `int main(int argc, char **argv)`, or + // `usize efi_main(void *handle, void *system_table)` depending on the target. + let llfty = if cx.sess().target.os.contains("uefi") { + cx.type_func(&[cx.type_ptr(), cx.type_ptr()], cx.type_isize()) + } else if cx.sess().target.main_needs_argc_argv { cx.type_func(&[cx.type_int(), cx.type_ptr()], cx.type_int()) } else { cx.type_func(&[], cx.type_int()) @@ -483,8 +487,12 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( }; let result = bx.call(start_ty, None, None, start_fn, &args, None); - let cast = bx.intcast(result, cx.type_int(), true); - bx.ret(cast); + if cx.sess().target.os.contains("uefi") { + bx.ret(result); + } else { + let cast = bx.intcast(result, cx.type_int(), true); + bx.ret(cast); + } llfn } @@ -495,7 +503,17 @@ fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( cx: &'a Bx::CodegenCx, bx: &mut Bx, ) -> (Bx::Value, Bx::Value) { - if cx.sess().target.main_needs_argc_argv { + if cx.sess().target.os.contains("uefi") { + // Params for UEFI + let param_handle = bx.get_param(0); + let param_system_table = bx.get_param(1); + let arg_argc = bx.const_int(cx.type_isize(), 2); + let arg_argv = bx.alloca(cx.type_array(cx.type_ptr(), 2), Align::ONE); + bx.store(param_handle, arg_argv, Align::ONE); + let arg_argv_el1 = bx.gep(cx.type_ptr(), arg_argv, &[bx.const_int(cx.type_int(), 1)]); + bx.store(param_system_table, arg_argv_el1, Align::ONE); + (arg_argc, arg_argv) + } else if cx.sess().target.main_needs_argc_argv { // Params from native `main()` used as args for rust start function let param_argc = bx.get_param(0); let param_argv = bx.get_param(1); @@ -839,7 +857,6 @@ impl CrateInfo { dependency_formats: tcx.dependency_formats(()).clone(), windows_subsystem, natvis_debugger_visualizers: Default::default(), - feature_packed_bundled_libs: tcx.features().packed_bundled_libs, }; let crates = tcx.crates(()); diff --git a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs index f6936c80b..59efe4cd3 100644 --- a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs +++ b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs @@ -16,7 +16,10 @@ use rustc_target::spec::{abi, SanitizerSet}; use crate::errors; use crate::target_features::from_target_feature; -use crate::{errors::ExpectedUsedSymbol, target_features::check_target_feature_trait_unsafe}; +use crate::{ + errors::{ExpectedCoverageSymbol, ExpectedUsedSymbol}, + target_features::check_target_feature_trait_unsafe, +}; fn linkage_by_name(tcx: TyCtxt<'_>, def_id: LocalDefId, name: &str) -> Linkage { use rustc_middle::mir::mono::Linkage::*; @@ -128,7 +131,21 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { .emit(); } } - sym::no_coverage => codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_COVERAGE, + sym::coverage => { + let inner = attr.meta_item_list(); + match inner.as_deref() { + Some([item]) if item.has_name(sym::off) => { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_COVERAGE; + } + Some([item]) if item.has_name(sym::on) => { + // Allow #[coverage(on)] for being explicit, maybe also in future to enable + // coverage on a smaller scope within an excluded larger scope. + } + Some(_) | None => { + tcx.sess.emit_err(ExpectedCoverageSymbol { span: attr.span }); + } + } + } sym::rustc_std_internal_symbol => { codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL } diff --git a/compiler/rustc_codegen_ssa/src/common.rs b/compiler/rustc_codegen_ssa/src/common.rs index 5a6807599..641ac3eb8 100644 --- a/compiler/rustc_codegen_ssa/src/common.rs +++ b/compiler/rustc_codegen_ssa/src/common.rs @@ -1,7 +1,7 @@ #![allow(non_camel_case_types)] use rustc_hir::LangItem; -use rustc_middle::mir::interpret::ConstValue; +use rustc_middle::mir; use rustc_middle::ty::{self, layout::TyAndLayout, Ty, TyCtxt}; use rustc_span::Span; @@ -194,10 +194,10 @@ pub fn shift_mask_val<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( pub fn asm_const_to_str<'tcx>( tcx: TyCtxt<'tcx>, sp: Span, - const_value: ConstValue<'tcx>, + const_value: mir::ConstValue<'tcx>, ty_and_layout: TyAndLayout<'tcx>, ) -> String { - let ConstValue::Scalar(scalar) = const_value else { + let mir::ConstValue::Scalar(scalar) = const_value else { span_bug!(sp, "expected Scalar for promoted asm const, but got {:#?}", const_value) }; let value = scalar.assert_bits(ty_and_layout.size); diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs index 067c824ab..989df448a 100644 --- a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs +++ b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs @@ -106,14 +106,14 @@ fn push_debuginfo_type_name<'tcx>( ty_and_layout, &|output, visited| { push_item_name(tcx, def.did(), true, output); - push_generic_params_internal(tcx, args, output, visited); + push_generic_params_internal(tcx, args, def.did(), output, visited); }, output, visited, ); } else { push_item_name(tcx, def.did(), qualified, output); - push_generic_params_internal(tcx, args, output, visited); + push_generic_params_internal(tcx, args, def.did(), output, visited); } } ty::Tuple(component_types) => { @@ -237,8 +237,13 @@ fn push_debuginfo_type_name<'tcx>( let principal = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), principal); push_item_name(tcx, principal.def_id, qualified, output); - let principal_has_generic_params = - push_generic_params_internal(tcx, principal.args, output, visited); + let principal_has_generic_params = push_generic_params_internal( + tcx, + principal.args, + principal.def_id, + output, + visited, + ); let projection_bounds: SmallVec<[_; 4]> = trait_data .projection_bounds() @@ -421,7 +426,6 @@ fn push_debuginfo_type_name<'tcx>( | ty::Placeholder(..) | ty::Alias(..) | ty::Bound(..) - | ty::GeneratorWitnessMIR(..) | ty::GeneratorWitness(..) => { bug!( "debuginfo: Trying to create type name for \ @@ -516,7 +520,13 @@ pub fn compute_debuginfo_vtable_name<'tcx>( tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), trait_ref); push_item_name(tcx, trait_ref.def_id, true, &mut vtable_name); visited.clear(); - push_generic_params_internal(tcx, trait_ref.args, &mut vtable_name, &mut visited); + push_generic_params_internal( + tcx, + trait_ref.args, + trait_ref.def_id, + &mut vtable_name, + &mut visited, + ); } else { vtable_name.push('_'); } @@ -610,20 +620,20 @@ fn push_unqualified_item_name( fn push_generic_params_internal<'tcx>( tcx: TyCtxt<'tcx>, args: GenericArgsRef<'tcx>, + def_id: DefId, output: &mut String, visited: &mut FxHashSet<Ty<'tcx>>, ) -> bool { - if args.non_erasable_generics().next().is_none() { + debug_assert_eq!(args, tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args)); + let mut args = args.non_erasable_generics(tcx, def_id).peekable(); + if args.peek().is_none() { return false; } - - debug_assert_eq!(args, tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args)); - let cpp_like_debuginfo = cpp_like_debuginfo(tcx); output.push('<'); - for type_parameter in args.non_erasable_generics() { + for type_parameter in args { match type_parameter { GenericArgKind::Type(type_parameter) => { push_debuginfo_type_name(tcx, type_parameter, true, output, visited); @@ -649,12 +659,12 @@ fn push_const_param<'tcx>(tcx: TyCtxt<'tcx>, ct: ty::Const<'tcx>, output: &mut S } _ => match ct.ty().kind() { ty::Int(ity) => { - let bits = ct.eval_bits(tcx, ty::ParamEnv::reveal_all(), ct.ty()); + let bits = ct.eval_bits(tcx, ty::ParamEnv::reveal_all()); let val = Integer::from_int_ty(&tcx, *ity).size().sign_extend(bits) as i128; write!(output, "{val}") } ty::Uint(_) => { - let val = ct.eval_bits(tcx, ty::ParamEnv::reveal_all(), ct.ty()); + let val = ct.eval_bits(tcx, ty::ParamEnv::reveal_all()); write!(output, "{val}") } ty::Bool => { @@ -670,10 +680,8 @@ fn push_const_param<'tcx>(tcx: TyCtxt<'tcx>, ct: ty::Const<'tcx>, output: &mut S // avoiding collisions and will make the emitted type names shorter. let hash_short = tcx.with_stable_hashing_context(|mut hcx| { let mut hasher = StableHasher::new(); - let ct = ct.eval(tcx, ty::ParamEnv::reveal_all()); - hcx.while_hashing_spans(false, |hcx| { - ct.to_valtree().hash_stable(hcx, &mut hasher) - }); + let ct = ct.eval(tcx, ty::ParamEnv::reveal_all(), None).unwrap(); + hcx.while_hashing_spans(false, |hcx| ct.hash_stable(hcx, &mut hasher)); hasher.finish::<Hash64>() }); @@ -691,11 +699,12 @@ fn push_const_param<'tcx>(tcx: TyCtxt<'tcx>, ct: ty::Const<'tcx>, output: &mut S pub fn push_generic_params<'tcx>( tcx: TyCtxt<'tcx>, args: GenericArgsRef<'tcx>, + def_id: DefId, output: &mut String, ) { let _prof = tcx.prof.generic_activity("compute_debuginfo_type_name"); let mut visited = FxHashSet::default(); - push_generic_params_internal(tcx, args, output, &mut visited); + push_generic_params_internal(tcx, args, def_id, output, &mut visited); } fn push_closure_or_generator_name<'tcx>( @@ -738,7 +747,7 @@ fn push_closure_or_generator_name<'tcx>( // Truncate the args to the length of the above generics. This will cut off // anything closure- or generator-specific. let args = args.truncate_to(tcx, generics); - push_generic_params_internal(tcx, args, output, visited); + push_generic_params_internal(tcx, args, enclosing_fn_def_id, output, visited); } fn push_close_angle_bracket(cpp_like_debuginfo: bool, output: &mut String) { diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs index b7d8b9b45..14311ec08 100644 --- a/compiler/rustc_codegen_ssa/src/errors.rs +++ b/compiler/rustc_codegen_ssa/src/errors.rs @@ -7,6 +7,7 @@ use rustc_errors::{ IntoDiagnosticArg, }; use rustc_macros::Diagnostic; +use rustc_middle::ty::layout::LayoutError; use rustc_middle::ty::Ty; use rustc_span::{Span, Symbol}; use rustc_type_ir::FloatTy; @@ -107,10 +108,6 @@ pub struct CreateTempDir { } #[derive(Diagnostic)] -#[diag(codegen_ssa_incompatible_linking_modifiers)] -pub struct IncompatibleLinkingModifiers; - -#[derive(Diagnostic)] #[diag(codegen_ssa_add_native_library)] pub struct AddNativeLibrary { pub library_path: PathBuf, @@ -561,6 +558,13 @@ pub struct UnknownArchiveKind<'a> { } #[derive(Diagnostic)] +#[diag(codegen_ssa_expected_coverage_symbol)] +pub struct ExpectedCoverageSymbol { + #[primary_span] + pub span: Span, +} + +#[derive(Diagnostic)] #[diag(codegen_ssa_expected_used_symbol)] pub struct ExpectedUsedSymbol { #[primary_span] @@ -588,20 +592,6 @@ pub struct InvalidWindowsSubsystem { } #[derive(Diagnostic)] -#[diag(codegen_ssa_erroneous_constant)] -pub struct ErroneousConstant { - #[primary_span] - pub span: Span, -} - -#[derive(Diagnostic)] -#[diag(codegen_ssa_polymorphic_constant_too_generic)] -pub struct PolymorphicConstantTooGeneric { - #[primary_span] - pub span: Span, -} - -#[derive(Diagnostic)] #[diag(codegen_ssa_shuffle_indices_evaluation)] pub struct ShuffleIndicesEvaluation { #[primary_span] @@ -1031,6 +1021,15 @@ pub struct TargetFeatureSafeTrait { } #[derive(Diagnostic)] +#[diag(codegen_ssa_failed_to_get_layout)] +pub struct FailedToGetLayout<'tcx> { + #[primary_span] + pub span: Span, + pub ty: Ty<'tcx>, + pub err: LayoutError<'tcx>, +} + +#[derive(Diagnostic)] #[diag(codegen_ssa_error_creating_remark_dir)] pub struct ErrorCreatingRemarkDir { pub error: std::io::Error, diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs index 7bed3fa61..f6186a290 100644 --- a/compiler/rustc_codegen_ssa/src/lib.rs +++ b/compiler/rustc_codegen_ssa/src/lib.rs @@ -31,7 +31,7 @@ use rustc_middle::dep_graph::WorkProduct; use rustc_middle::middle::debugger_visualizer::DebuggerVisualizerFile; use rustc_middle::middle::dependency_format::Dependencies; use rustc_middle::middle::exported_symbols::SymbolExportKind; -use rustc_middle::query::{ExternProviders, Providers}; +use rustc_middle::util::Providers; use rustc_serialize::opaque::{FileEncoder, MemDecoder}; use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; use rustc_session::config::{CrateType, OutputFilenames, OutputType, RUST_CGU_EXT}; @@ -164,7 +164,6 @@ pub struct CrateInfo { pub dependency_formats: Lrc<Dependencies>, pub windows_subsystem: Option<String>, pub natvis_debugger_visualizers: BTreeSet<DebuggerVisualizerFile>, - pub feature_packed_bundled_libs: bool, // unstable feature flag. } #[derive(Encodable, Decodable)] @@ -190,10 +189,6 @@ pub fn provide(providers: &mut Providers) { crate::codegen_attrs::provide(providers); } -pub fn provide_extern(providers: &mut ExternProviders) { - crate::back::symbol_export::provide_extern(providers); -} - /// Checks if the given filename ends with the `.rcgu.o` extension that `rustc` /// uses for the object files it generates. pub fn looks_like_rust_object_file(filename: &str) -> bool { diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs index 22c1f0597..d9419dbc9 100644 --- a/compiler/rustc_codegen_ssa/src/mir/analyze.rs +++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs @@ -234,7 +234,7 @@ impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx> | PlaceContext::NonMutatingUse( NonMutatingUseContext::Inspect | NonMutatingUseContext::SharedBorrow - | NonMutatingUseContext::ShallowBorrow + | NonMutatingUseContext::FakeBorrow | NonMutatingUseContext::AddressOf | NonMutatingUseContext::Projection, ) => { @@ -284,8 +284,8 @@ pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKi for (bb, data) in mir.basic_blocks.iter_enumerated() { match data.terminator().kind { TerminatorKind::Goto { .. } - | TerminatorKind::Resume - | TerminatorKind::Terminate + | TerminatorKind::UnwindResume + | TerminatorKind::UnwindTerminate(_) | TerminatorKind::Return | TerminatorKind::GeneratorDrop | TerminatorKind::Unreachable diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 4f26383ed..bd0707edf 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -12,7 +12,7 @@ use crate::MemFlags; use rustc_ast as ast; use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_hir::lang_items::LangItem; -use rustc_middle::mir::{self, AssertKind, SwitchTargets}; +use rustc_middle::mir::{self, AssertKind, SwitchTargets, UnwindTerminateReason}; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, ValidityRequirement}; use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths}; use rustc_middle::ty::{self, Instance, Ty}; @@ -178,7 +178,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { mir::UnwindAction::Cleanup(cleanup) => Some(self.llbb_with_cleanup(fx, cleanup)), mir::UnwindAction::Continue => None, mir::UnwindAction::Unreachable => None, - mir::UnwindAction::Terminate => { + mir::UnwindAction::Terminate(reason) => { if fx.mir[self.bb].is_cleanup && base::wants_new_eh_instructions(fx.cx.tcx().sess) { // MSVC SEH will abort automatically if an exception tries to // propagate out from cleanup. @@ -191,7 +191,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { None } else { - Some(fx.terminate_block()) + Some(fx.terminate_block(reason)) } } }; @@ -264,7 +264,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { ) -> MergingSucc { let unwind_target = match unwind { mir::UnwindAction::Cleanup(cleanup) => Some(self.llbb_with_cleanup(fx, cleanup)), - mir::UnwindAction::Terminate => Some(fx.terminate_block()), + mir::UnwindAction::Terminate(reason) => Some(fx.terminate_block(reason)), mir::UnwindAction::Continue => None, mir::UnwindAction::Unreachable => None, }; @@ -416,7 +416,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } - PassMode::Cast(cast_ty, _) => { + PassMode::Cast { cast: cast_ty, pad_i32: _ } => { let op = match self.locals[mir::RETURN_PLACE] { LocalRef::Operand(op) => op, LocalRef::PendingOperand => bug!("use of return before def"), @@ -649,12 +649,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { helper: TerminatorCodegenHelper<'tcx>, bx: &mut Bx, terminator: &mir::Terminator<'tcx>, + reason: UnwindTerminateReason, ) { let span = terminator.source_info.span; self.set_debug_loc(bx, terminator.source_info); // Obtain the panic entry point. - let (fn_abi, llfn) = common::build_langcall(bx, Some(span), LangItem::PanicCannotUnwind); + let (fn_abi, llfn) = common::build_langcall(bx, Some(span), reason.lang_item()); // Codegen the actual panic invoke/call. let merging_succ = helper.do_call( @@ -927,21 +928,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // we get a value of a built-in pointer type. // // This is also relevant for `Pin<&mut Self>`, where we need to peel the `Pin`. - 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr() - && !op.layout.ty.is_ref() - { - for i in 0..op.layout.fields.count() { - let field = op.extract_field(bx, i); - if !field.layout.is_zst() { - // we found the one non-zero-sized field that is allowed - // now find *its* non-zero-sized field, or stop if it's a - // pointer - op = field; - continue 'descend_newtypes; - } - } - - span_bug!(span, "receiver has no non-zero-sized fields {:?}", op); + while !op.layout.ty.is_unsafe_ptr() && !op.layout.ty.is_ref() { + let (idx, _) = op.layout.non_1zst_field(bx).expect( + "not exactly one non-1-ZST field in a `DispatchFromDyn` type", + ); + op = op.extract_field(bx, idx); } // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its @@ -969,21 +960,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } Immediate(_) => { // See comment above explaining why we peel these newtypes - 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr() - && !op.layout.ty.is_ref() - { - for i in 0..op.layout.fields.count() { - let field = op.extract_field(bx, i); - if !field.layout.is_zst() { - // we found the one non-zero-sized field that is allowed - // now find *its* non-zero-sized field, or stop if it's a - // pointer - op = field; - continue 'descend_newtypes; - } - } - - span_bug!(span, "receiver has no non-zero-sized fields {:?}", op); + while !op.layout.ty.is_unsafe_ptr() && !op.layout.ty.is_ref() { + let (idx, _) = op.layout.non_1zst_field(bx).expect( + "not exactly one non-1-ZST field in a `DispatchFromDyn` type", + ); + op = op.extract_field(bx, idx); } // Make sure that we've actually unwrapped the rcvr down @@ -1107,9 +1088,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { InlineAsmOperandRef::InOut { reg, late, in_value, out_place } } mir::InlineAsmOperand::Const { ref value } => { - let const_value = self - .eval_mir_constant(value) - .unwrap_or_else(|_| span_bug!(span, "asm const cannot be resolved")); + let const_value = self.eval_mir_constant(value); let string = common::asm_const_to_str( bx.tcx(), span, @@ -1119,8 +1098,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { InlineAsmOperandRef::Const { string } } mir::InlineAsmOperand::SymFn { ref value } => { - let literal = self.monomorphize(value.literal); - if let ty::FnDef(def_id, args) = *literal.ty().kind() { + let const_ = self.monomorphize(value.const_); + if let ty::FnDef(def_id, args) = *const_.ty().kind() { let instance = ty::Instance::resolve_for_fn_ptr( bx.tcx(), ty::ParamEnv::reveal_all(), @@ -1224,13 +1203,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { self.set_debug_loc(bx, terminator.source_info); match terminator.kind { - mir::TerminatorKind::Resume => { + mir::TerminatorKind::UnwindResume => { self.codegen_resume_terminator(helper, bx); MergingSucc::False } - mir::TerminatorKind::Terminate => { - self.codegen_terminate_terminator(helper, bx, terminator); + mir::TerminatorKind::UnwindTerminate(reason) => { + self.codegen_terminate_terminator(helper, bx, terminator, reason); MergingSucc::False } @@ -1329,7 +1308,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { ) { match arg.mode { PassMode::Ignore => return, - PassMode::Cast(_, true) => { + PassMode::Cast { pad_i32: true, .. } => { // Fill padding with undef value, where applicable. llargs.push(bx.const_undef(bx.reg_backend_type(&Reg::i32()))); } @@ -1341,7 +1320,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } _ => bug!("codegen_argument: {:?} invalid for pair argument", op), }, - PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => match op.val { + PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => match op.val { Ref(a, Some(b), _) => { llargs.push(a); llargs.push(b); @@ -1366,7 +1345,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { op.val.store(bx, scratch); (scratch.llval, scratch.align, true) } - PassMode::Cast(..) => { + PassMode::Cast { .. } => { let scratch = PlaceRef::alloca(bx, arg.layout); op.val.store(bx, scratch); (scratch.llval, scratch.align, true) @@ -1419,7 +1398,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. - if let PassMode::Cast(ty, _) = &arg.mode { + if let PassMode::Cast { cast: ty, .. } = &arg.mode { let llty = bx.cast_backend_type(ty); llval = bx.load(llty, llval, align.min(arg.layout.align.abi)); } else { @@ -1579,79 +1558,81 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }) } - fn terminate_block(&mut self) -> Bx::BasicBlock { - self.terminate_block.unwrap_or_else(|| { - let funclet; - let llbb; - let mut bx; - if base::wants_msvc_seh(self.cx.sess()) { - // This is a basic block that we're aborting the program for, - // notably in an `extern` function. These basic blocks are inserted - // so that we assert that `extern` functions do indeed not panic, - // and if they do we abort the process. - // - // On MSVC these are tricky though (where we're doing funclets). If - // we were to do a cleanuppad (like below) the normal functions like - // `longjmp` would trigger the abort logic, terminating the - // program. Instead we insert the equivalent of `catch(...)` for C++ - // which magically doesn't trigger when `longjmp` files over this - // frame. - // - // Lots more discussion can be found on #48251 but this codegen is - // modeled after clang's for: - // - // try { - // foo(); - // } catch (...) { - // bar(); - // } - // - // which creates an IR snippet like - // - // cs_terminate: - // %cs = catchswitch within none [%cp_terminate] unwind to caller - // cp_terminate: - // %cp = catchpad within %cs [null, i32 64, null] - // ... - - llbb = Bx::append_block(self.cx, self.llfn, "cs_terminate"); - let cp_llbb = Bx::append_block(self.cx, self.llfn, "cp_terminate"); - - let mut cs_bx = Bx::build(self.cx, llbb); - let cs = cs_bx.catch_switch(None, None, &[cp_llbb]); - - // The "null" here is actually a RTTI type descriptor for the - // C++ personality function, but `catch (...)` has no type so - // it's null. The 64 here is actually a bitfield which - // represents that this is a catch-all block. - bx = Bx::build(self.cx, cp_llbb); - let null = - bx.const_null(bx.type_ptr_ext(bx.cx().data_layout().instruction_address_space)); - let sixty_four = bx.const_i32(64); - funclet = Some(bx.catch_pad(cs, &[null, sixty_four, null])); - } else { - llbb = Bx::append_block(self.cx, self.llfn, "terminate"); - bx = Bx::build(self.cx, llbb); + fn terminate_block(&mut self, reason: UnwindTerminateReason) -> Bx::BasicBlock { + if let Some((cached_bb, cached_reason)) = self.terminate_block && reason == cached_reason { + return cached_bb; + } - let llpersonality = self.cx.eh_personality(); - bx.filter_landing_pad(llpersonality); + let funclet; + let llbb; + let mut bx; + if base::wants_msvc_seh(self.cx.sess()) { + // This is a basic block that we're aborting the program for, + // notably in an `extern` function. These basic blocks are inserted + // so that we assert that `extern` functions do indeed not panic, + // and if they do we abort the process. + // + // On MSVC these are tricky though (where we're doing funclets). If + // we were to do a cleanuppad (like below) the normal functions like + // `longjmp` would trigger the abort logic, terminating the + // program. Instead we insert the equivalent of `catch(...)` for C++ + // which magically doesn't trigger when `longjmp` files over this + // frame. + // + // Lots more discussion can be found on #48251 but this codegen is + // modeled after clang's for: + // + // try { + // foo(); + // } catch (...) { + // bar(); + // } + // + // which creates an IR snippet like + // + // cs_terminate: + // %cs = catchswitch within none [%cp_terminate] unwind to caller + // cp_terminate: + // %cp = catchpad within %cs [null, i32 64, null] + // ... + + llbb = Bx::append_block(self.cx, self.llfn, "cs_terminate"); + let cp_llbb = Bx::append_block(self.cx, self.llfn, "cp_terminate"); + + let mut cs_bx = Bx::build(self.cx, llbb); + let cs = cs_bx.catch_switch(None, None, &[cp_llbb]); + + // The "null" here is actually a RTTI type descriptor for the + // C++ personality function, but `catch (...)` has no type so + // it's null. The 64 here is actually a bitfield which + // represents that this is a catch-all block. + bx = Bx::build(self.cx, cp_llbb); + let null = + bx.const_null(bx.type_ptr_ext(bx.cx().data_layout().instruction_address_space)); + let sixty_four = bx.const_i32(64); + funclet = Some(bx.catch_pad(cs, &[null, sixty_four, null])); + } else { + llbb = Bx::append_block(self.cx, self.llfn, "terminate"); + bx = Bx::build(self.cx, llbb); - funclet = None; - } + let llpersonality = self.cx.eh_personality(); + bx.filter_landing_pad(llpersonality); - self.set_debug_loc(&mut bx, mir::SourceInfo::outermost(self.mir.span)); + funclet = None; + } - let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, LangItem::PanicCannotUnwind); - let fn_ty = bx.fn_decl_backend_type(&fn_abi); + self.set_debug_loc(&mut bx, mir::SourceInfo::outermost(self.mir.span)); - let llret = bx.call(fn_ty, None, Some(&fn_abi), fn_ptr, &[], funclet.as_ref()); - bx.do_not_inline(llret); + let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, reason.lang_item()); + let fn_ty = bx.fn_decl_backend_type(&fn_abi); - bx.unreachable(); + let llret = bx.call(fn_ty, None, Some(&fn_abi), fn_ptr, &[], funclet.as_ref()); + bx.do_not_inline(llret); - self.terminate_block = Some(llbb); - llbb - }) + bx.unreachable(); + + self.terminate_block = Some((llbb, reason)); + llbb } /// Get the backend `BasicBlock` for a MIR `BasicBlock`, either already @@ -1761,7 +1742,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } DirectOperand(index) => { // If there is a cast, we have to store and reload. - let op = if let PassMode::Cast(..) = ret_abi.mode { + let op = if let PassMode::Cast { .. } = ret_abi.mode { let tmp = PlaceRef::alloca(bx, ret_abi.layout); tmp.storage_live(bx); bx.store_arg(&ret_abi, llval, tmp); diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs index babcf9bee..fde4e85f9 100644 --- a/compiler/rustc_codegen_ssa/src/mir/constant.rs +++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs @@ -2,7 +2,7 @@ use crate::errors; use crate::mir::operand::OperandRef; use crate::traits::*; use rustc_middle::mir; -use rustc_middle::mir::interpret::{ConstValue, ErrorHandled}; +use rustc_middle::mir::interpret::ErrorHandled; use rustc_middle::ty::layout::HasTyCtxt; use rustc_middle::ty::{self, Ty}; use rustc_target::abi::Abi; @@ -13,74 +13,44 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn eval_mir_constant_to_operand( &self, bx: &mut Bx, - constant: &mir::Constant<'tcx>, - ) -> Result<OperandRef<'tcx, Bx::Value>, ErrorHandled> { - let val = self.eval_mir_constant(constant)?; + constant: &mir::ConstOperand<'tcx>, + ) -> OperandRef<'tcx, Bx::Value> { + let val = self.eval_mir_constant(constant); let ty = self.monomorphize(constant.ty()); - Ok(OperandRef::from_const(bx, val, ty)) + OperandRef::from_const(bx, val, ty) } - pub fn eval_mir_constant( - &self, - constant: &mir::Constant<'tcx>, - ) -> Result<ConstValue<'tcx>, ErrorHandled> { - let ct = self.monomorphize(constant.literal); - let uv = match ct { - mir::ConstantKind::Ty(ct) => match ct.kind() { - ty::ConstKind::Unevaluated(uv) => uv.expand(), - ty::ConstKind::Value(val) => { - return Ok(self.cx.tcx().valtree_to_const_val((ct.ty(), val))); - } - err => span_bug!( - constant.span, - "encountered bad ConstKind after monomorphizing: {:?}", - err - ), - }, - mir::ConstantKind::Unevaluated(uv, _) => uv, - mir::ConstantKind::Val(val, _) => return Ok(val), - }; - - self.cx.tcx().const_eval_resolve(ty::ParamEnv::reveal_all(), uv, None).map_err(|err| { - match err { - ErrorHandled::Reported(_) => { - self.cx.tcx().sess.emit_err(errors::ErroneousConstant { span: constant.span }); - } - ErrorHandled::TooGeneric => { - self.cx - .tcx() - .sess - .diagnostic() - .emit_bug(errors::PolymorphicConstantTooGeneric { span: constant.span }); - } - } - err - }) + pub fn eval_mir_constant(&self, constant: &mir::ConstOperand<'tcx>) -> mir::ConstValue<'tcx> { + self.monomorphize(constant.const_) + .eval(self.cx.tcx(), ty::ParamEnv::reveal_all(), Some(constant.span)) + .expect("erroneous constant not captured by required_consts") } /// This is a convenience helper for `simd_shuffle_indices`. It has the precondition - /// that the given `constant` is an `ConstantKind::Unevaluated` and must be convertible to + /// that the given `constant` is an `Const::Unevaluated` and must be convertible to /// a `ValTree`. If you want a more general version of this, talk to `wg-const-eval` on zulip. + /// + /// Note that this function is cursed, since usually MIR consts should not be evaluated to valtrees! pub fn eval_unevaluated_mir_constant_to_valtree( &self, - constant: &mir::Constant<'tcx>, + constant: &mir::ConstOperand<'tcx>, ) -> Result<Option<ty::ValTree<'tcx>>, ErrorHandled> { - let uv = match self.monomorphize(constant.literal) { - mir::ConstantKind::Unevaluated(uv, _) => uv.shrink(), - mir::ConstantKind::Ty(c) => match c.kind() { + let uv = match self.monomorphize(constant.const_) { + mir::Const::Unevaluated(uv, _) => uv.shrink(), + mir::Const::Ty(c) => match c.kind() { // A constant that came from a const generic but was then used as an argument to old-style // simd_shuffle (passing as argument instead of as a generic param). rustc_type_ir::ConstKind::Value(valtree) => return Ok(Some(valtree)), other => span_bug!(constant.span, "{other:#?}"), }, - // We should never encounter `ConstantKind::Val` unless MIR opts (like const prop) evaluate + // We should never encounter `Const::Val` unless MIR opts (like const prop) evaluate // a constant and write that value back into `Operand`s. This could happen, but is unlikely. // Also: all users of `simd_shuffle` are on unstable and already need to take a lot of care // around intrinsics. For an issue to happen here, it would require a macro expanding to a // `simd_shuffle` call without wrapping the constant argument in a `const {}` block, but // the user pass through arbitrary expressions. // FIXME(oli-obk): replace the magic const generic argument of `simd_shuffle` with a real - // const generic. + // const generic, and get rid of this entire function. other => span_bug!(constant.span, "{other:#?}"), }; let uv = self.monomorphize(uv); @@ -95,7 +65,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn simd_shuffle_indices( &mut self, bx: &Bx, - constant: &mir::Constant<'tcx>, + constant: &mir::ConstOperand<'tcx>, ) -> (Bx::Value, Ty<'tcx>) { let ty = self.monomorphize(constant.ty()); let val = self diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs index 526c16a59..0dc30d21c 100644 --- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs +++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs @@ -1,10 +1,12 @@ use crate::traits::*; +use rustc_data_structures::fx::FxHashMap; use rustc_index::IndexVec; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::mir; use rustc_middle::ty; use rustc_middle::ty::layout::TyAndLayout; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf}; +use rustc_middle::ty::Instance; use rustc_middle::ty::Ty; use rustc_session::config::DebugInfo; use rustc_span::symbol::{kw, Symbol}; @@ -17,10 +19,13 @@ use super::{FunctionCx, LocalRef}; use std::ops::Range; -pub struct FunctionDebugContext<S, L> { +pub struct FunctionDebugContext<'tcx, S, L> { + /// Maps from source code to the corresponding debug info scope. pub scopes: IndexVec<mir::SourceScope, DebugScope<S, L>>, -} + /// Maps from an inlined function to its debug info declaration. + pub inlined_function_scopes: FxHashMap<Instance<'tcx>, S>, +} #[derive(Copy, Clone)] pub enum VariableKind { ArgumentVariable(usize /*index*/), @@ -153,8 +158,7 @@ fn calculate_debuginfo_offset< L: DebugInfoOffsetLocation<'tcx, Bx>, >( bx: &mut Bx, - local: mir::Local, - var: &PerLocalVarDebugInfo<'tcx, Bx::DIVariable>, + projection: &[mir::PlaceElem<'tcx>], base: L, ) -> DebugInfoOffset<L> { let mut direct_offset = Size::ZERO; @@ -162,7 +166,7 @@ fn calculate_debuginfo_offset< let mut indirect_offsets = vec![]; let mut place = base; - for elem in &var.projection[..] { + for elem in projection { match *elem { mir::ProjectionElem::Deref => { indirect_offsets.push(Size::ZERO); @@ -183,11 +187,7 @@ fn calculate_debuginfo_offset< } => { let offset = indirect_offsets.last_mut().unwrap_or(&mut direct_offset); let FieldsShape::Array { stride, count: _ } = place.layout().fields else { - span_bug!( - var.source_info.span, - "ConstantIndex on non-array type {:?}", - place.layout() - ) + bug!("ConstantIndex on non-array type {:?}", place.layout()) }; *offset += stride * index; place = place.project_constant_index(bx, index); @@ -195,11 +195,7 @@ fn calculate_debuginfo_offset< _ => { // Sanity check for `can_use_in_debuginfo`. debug_assert!(!elem.can_use_in_debuginfo()); - span_bug!( - var.source_info.span, - "unsupported var debuginfo place `{:?}`", - mir::Place { local, projection: var.projection }, - ) + bug!("unsupported var debuginfo projection `{:?}`", projection) } } } @@ -402,7 +398,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let Some(dbg_loc) = self.dbg_loc(var.source_info) else { return }; let DebugInfoOffset { direct_offset, indirect_offsets, result: _ } = - calculate_debuginfo_offset(bx, local, &var, base.layout); + calculate_debuginfo_offset(bx, &var.projection, base.layout); // When targeting MSVC, create extra allocas for arguments instead of pointing multiple // dbg_var_addr() calls into the same alloca with offsets. MSVC uses CodeView records @@ -420,7 +416,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if should_create_individual_allocas { let DebugInfoOffset { direct_offset: _, indirect_offsets: _, result: place } = - calculate_debuginfo_offset(bx, local, &var, base); + calculate_debuginfo_offset(bx, &var.projection, base); // Create a variable which will be a pointer to the actual value let ptr_ty = Ty::new_ptr( @@ -484,54 +480,75 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { None }; - let dbg_var = dbg_scope_and_span.map(|(dbg_scope, _, span)| { - let (var_ty, var_kind) = match var.value { + let var_ty = if let Some(ref fragment) = var.composite { + self.monomorphize(fragment.ty) + } else { + match var.value { mir::VarDebugInfoContents::Place(place) => { - let var_ty = self.monomorphized_place_ty(place.as_ref()); - let var_kind = if let Some(arg_index) = var.argument_index - && place.projection.is_empty() - { - let arg_index = arg_index as usize; - if target_is_msvc { - // ScalarPair parameters are spilled to the stack so they need to - // be marked as a `LocalVariable` for MSVC debuggers to visualize - // their data correctly. (See #81894 & #88625) - let var_ty_layout = self.cx.layout_of(var_ty); - if let Abi::ScalarPair(_, _) = var_ty_layout.abi { - VariableKind::LocalVariable - } else { - VariableKind::ArgumentVariable(arg_index) - } - } else { - // FIXME(eddyb) shouldn't `ArgumentVariable` indices be - // offset in closures to account for the hidden environment? - VariableKind::ArgumentVariable(arg_index) - } - } else { - VariableKind::LocalVariable - }; - (var_ty, var_kind) - } - mir::VarDebugInfoContents::Const(c) => { - let ty = self.monomorphize(c.ty()); - (ty, VariableKind::LocalVariable) + self.monomorphized_place_ty(place.as_ref()) } - mir::VarDebugInfoContents::Composite { ty, fragments: _ } => { - let ty = self.monomorphize(ty); - (ty, VariableKind::LocalVariable) + mir::VarDebugInfoContents::Const(c) => self.monomorphize(c.ty()), + } + }; + + let dbg_var = dbg_scope_and_span.map(|(dbg_scope, _, span)| { + let var_kind = if let Some(arg_index) = var.argument_index + && var.composite.is_none() + && let mir::VarDebugInfoContents::Place(place) = var.value + && place.projection.is_empty() + { + let arg_index = arg_index as usize; + if target_is_msvc { + // ScalarPair parameters are spilled to the stack so they need to + // be marked as a `LocalVariable` for MSVC debuggers to visualize + // their data correctly. (See #81894 & #88625) + let var_ty_layout = self.cx.layout_of(var_ty); + if let Abi::ScalarPair(_, _) = var_ty_layout.abi { + VariableKind::LocalVariable + } else { + VariableKind::ArgumentVariable(arg_index) + } + } else { + // FIXME(eddyb) shouldn't `ArgumentVariable` indices be + // offset in closures to account for the hidden environment? + VariableKind::ArgumentVariable(arg_index) } + } else { + VariableKind::LocalVariable }; self.cx.create_dbg_var(var.name, var_ty, dbg_scope, var_kind, span) }); + let fragment = if let Some(ref fragment) = var.composite { + let var_layout = self.cx.layout_of(var_ty); + + let DebugInfoOffset { direct_offset, indirect_offsets, result: fragment_layout } = + calculate_debuginfo_offset(bx, &fragment.projection, var_layout); + debug_assert!(indirect_offsets.is_empty()); + + if fragment_layout.size == Size::ZERO { + // Fragment is a ZST, so does not represent anything. Avoid generating anything + // as this may conflict with a fragment that covers the entire variable. + continue; + } else if fragment_layout.size == var_layout.size { + // Fragment covers entire variable, so as far as + // DWARF is concerned, it's not really a fragment. + None + } else { + Some(direct_offset..direct_offset + fragment_layout.size) + } + } else { + None + }; + match var.value { mir::VarDebugInfoContents::Place(place) => { per_local[place.local].push(PerLocalVarDebugInfo { name: var.name, source_info: var.source_info, dbg_var, - fragment: None, + fragment, projection: place.projection, }); } @@ -539,59 +556,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if let Some(dbg_var) = dbg_var { let Some(dbg_loc) = self.dbg_loc(var.source_info) else { continue }; - if let Ok(operand) = self.eval_mir_constant_to_operand(bx, &c) { - self.set_debug_loc(bx, var.source_info); - let base = Self::spill_operand_to_stack( - operand, - Some(var.name.to_string()), - bx, - ); + let operand = self.eval_mir_constant_to_operand(bx, &c); + self.set_debug_loc(bx, var.source_info); + let base = + Self::spill_operand_to_stack(operand, Some(var.name.to_string()), bx); - bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, Size::ZERO, &[], None); - } - } - } - mir::VarDebugInfoContents::Composite { ty, ref fragments } => { - let var_ty = self.monomorphize(ty); - let var_layout = self.cx.layout_of(var_ty); - for fragment in fragments { - let mut fragment_start = Size::ZERO; - let mut fragment_layout = var_layout; - - for elem in &fragment.projection { - match *elem { - mir::ProjectionElem::Field(field, _) => { - let i = field.index(); - fragment_start += fragment_layout.fields.offset(i); - fragment_layout = fragment_layout.field(self.cx, i); - } - _ => span_bug!( - var.source_info.span, - "unsupported fragment projection `{:?}`", - elem, - ), - } - } - - let place = fragment.contents; - let fragment = if fragment_layout.size == Size::ZERO { - // Fragment is a ZST, so does not represent anything. - continue; - } else if fragment_layout.size == var_layout.size { - // Fragment covers entire variable, so as far as - // DWARF is concerned, it's not really a fragment. - None - } else { - Some(fragment_start..fragment_start + fragment_layout.size) - }; - - per_local[place.local].push(PerLocalVarDebugInfo { - name: var.name, - source_info: var.source_info, - dbg_var, - fragment, - projection: place.projection, - }); + bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, Size::ZERO, &[], fragment); } } } diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 8821fb21f..8efef4405 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -462,7 +462,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; if !fn_abi.ret.is_ignore() { - if let PassMode::Cast(..) = &fn_abi.ret.mode { + if let PassMode::Cast { .. } = &fn_abi.ret.mode { bx.store(llval, result.llval, result.align); } else { OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index 3464f9108..a61018f98 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -3,8 +3,8 @@ use crate::traits::*; use rustc_index::bit_set::BitSet; use rustc_index::IndexVec; use rustc_middle::mir; -use rustc_middle::mir::interpret::ErrorHandled; use rustc_middle::mir::traversal; +use rustc_middle::mir::UnwindTerminateReason; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, TyAndLayout}; use rustc_middle::ty::{self, Instance, Ty, TyCtxt, TypeFoldable, TypeVisitableExt}; use rustc_target::abi::call::{FnAbi, PassMode}; @@ -45,7 +45,7 @@ pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> { mir: &'tcx mir::Body<'tcx>, - debug_context: Option<FunctionDebugContext<Bx::DIScope, Bx::DILocation>>, + debug_context: Option<FunctionDebugContext<'tcx, Bx::DIScope, Bx::DILocation>>, llfn: Bx::Function, @@ -83,8 +83,8 @@ pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> { /// Cached unreachable block unreachable_block: Option<Bx::BasicBlock>, - /// Cached terminate upon unwinding block - terminate_block: Option<Bx::BasicBlock>, + /// Cached terminate upon unwinding block and its reason + terminate_block: Option<(Bx::BasicBlock, UnwindTerminateReason)>, /// The location where each MIR arg/var/tmp/ret is stored. This is /// usually an `PlaceRef` representing an alloca, but not always: @@ -118,7 +118,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { T: Copy + TypeFoldable<TyCtxt<'tcx>>, { debug!("monomorphize: self.instance={:?}", self.instance); - self.instance.subst_mir_and_normalize_erasing_regions( + self.instance.instantiate_mir_and_normalize_erasing_regions( self.cx.tcx(), ty::ParamEnv::reveal_all(), ty::EarlyBinder::bind(value), @@ -144,7 +144,7 @@ impl<'tcx, V: CodegenObject> LocalRef<'tcx, V> { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but - // we need something in the operand. + // we need something sufficiently aligned in the operand. LocalRef::Operand(OperandRef::zero_sized(layout)) } else { LocalRef::PendingOperand @@ -174,7 +174,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let mut start_bx = Bx::build(cx, start_llbb); if mir.basic_blocks.iter().any(|bb| { - bb.is_cleanup || matches!(bb.terminator().unwind(), Some(mir::UnwindAction::Terminate)) + bb.is_cleanup || matches!(bb.terminator().unwind(), Some(mir::UnwindAction::Terminate(_))) }) { start_bx.set_personality_fn(cx.eh_personality()); } @@ -211,23 +211,14 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut start_bx); - // Evaluate all required consts; codegen later assumes that CTFE will never fail. - let mut all_consts_ok = true; - for const_ in &mir.required_consts { - if let Err(err) = fx.eval_mir_constant(const_) { - all_consts_ok = false; - match err { - // errored or at least linted - ErrorHandled::Reported(_) => {} - ErrorHandled::TooGeneric => { - span_bug!(const_.span, "codegen encountered polymorphic constant: {:?}", err) - } - } - } - } - if !all_consts_ok { - // We leave the IR in some half-built state here, and rely on this code not even being - // submitted to LLVM once an error was raised. + // Rust post-monomorphization checks; we later rely on them. + if let Err(err) = + mir.post_mono_checks(cx.tcx(), ty::ParamEnv::reveal_all(), |c| Ok(fx.monomorphize(c))) + { + err.emit_err(cx.tcx()); + // This IR shouldn't ever be emitted, but let's try to guard against any of this code + // ever running. + start_bx.abort(); return; } @@ -326,7 +317,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( for i in 0..tupled_arg_tys.len() { let arg = &fx.fn_abi.args[idx]; idx += 1; - if let PassMode::Cast(_, true) = arg.mode { + if let PassMode::Cast { pad_i32: true, .. } = arg.mode { llarg_idx += 1; } let pr_field = place.project_field(bx, i); @@ -350,7 +341,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let arg = &fx.fn_abi.args[idx]; idx += 1; - if let PassMode::Cast(_, true) = arg.mode { + if let PassMode::Cast { pad_i32: true, .. } = arg.mode { llarg_idx += 1; } diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index f90d1a0fc..0ab2b7ecd 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -6,8 +6,8 @@ use crate::glue; use crate::traits::*; use crate::MemFlags; -use rustc_middle::mir; -use rustc_middle::mir::interpret::{alloc_range, ConstValue, Pointer, Scalar}; +use rustc_middle::mir::interpret::{alloc_range, Pointer, Scalar}; +use rustc_middle::mir::{self, ConstValue}; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::Ty; use rustc_target::abi::{self, Abi, Align, Size}; @@ -50,7 +50,8 @@ pub enum OperandValue<V> { /// from [`ConstMethods::const_poison`]. /// /// An `OperandValue` *must* be this variant for any type for which - /// `is_zst` on its `Layout` returns `true`. + /// `is_zst` on its `Layout` returns `true`. Note however that + /// these values can still require alignment. ZeroSized, } @@ -85,7 +86,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>( bx: &mut Bx, - val: ConstValue<'tcx>, + val: mir::ConstValue<'tcx>, ty: Ty<'tcx>, ) -> Self { let layout = bx.layout_of(ty); @@ -99,12 +100,12 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { OperandValue::Immediate(llval) } ConstValue::ZeroSized => return OperandRef::zero_sized(layout), - ConstValue::Slice { data, start, end } => { + ConstValue::Slice { data, meta } => { let Abi::ScalarPair(a_scalar, _) = layout.abi else { bug!("from_const: invalid ScalarPair layout: {:#?}", layout); }; let a = Scalar::from_pointer( - Pointer::new(bx.tcx().create_memory_alloc(data), Size::from_bytes(start)), + Pointer::new(bx.tcx().reserve_and_set_memory_alloc(data), Size::ZERO), &bx.tcx(), ); let a_llval = bx.scalar_to_backend( @@ -112,10 +113,11 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { a_scalar, bx.scalar_pair_element_backend_type(layout, 0, true), ); - let b_llval = bx.const_usize((end - start) as u64); + let b_llval = bx.const_usize(meta); OperandValue::Pair(a_llval, b_llval) } - ConstValue::ByRef { alloc, offset } => { + ConstValue::Indirect { alloc_id, offset } => { + let alloc = bx.tcx().global_alloc(alloc_id).unwrap_memory(); return Self::from_const_alloc(bx, layout, alloc, offset); } }; @@ -133,15 +135,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { assert_eq!(alloc_align, layout.align.abi); let read_scalar = |start, size, s: abi::Scalar, ty| { - let val = alloc - .0 - .read_scalar( - bx, - alloc_range(start, size), - /*read_provenance*/ matches!(s.primitive(), abi::Pointer(_)), - ) - .unwrap(); - bx.scalar_to_backend(val, s, ty) + match alloc.0.read_scalar( + bx, + alloc_range(start, size), + /*read_provenance*/ matches!(s.primitive(), abi::Pointer(_)), + ) { + Ok(val) => bx.scalar_to_backend(val, s, ty), + Err(_) => bx.const_poison(ty), + } }; // It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point. @@ -154,7 +155,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => { let size = s.size(bx); assert_eq!(size, layout.size, "abi::Scalar size does not match layout size"); - let val = read_scalar(Size::ZERO, size, s, bx.type_ptr()); + let val = read_scalar(offset, size, s, bx.backend_type(layout)); OperandRef { val: OperandValue::Immediate(val), layout } } Abi::ScalarPair( @@ -162,10 +163,10 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { b @ abi::Scalar::Initialized { .. }, ) => { let (a_size, b_size) = (a.size(bx), b.size(bx)); - let b_offset = a_size.align_to(b.align(bx).abi); + let b_offset = (offset + a_size).align_to(b.align(bx).abi); assert!(b_offset.bytes() > 0); let a_val = read_scalar( - Size::ZERO, + offset, a_size, a, bx.scalar_pair_element_backend_type(layout, 0, true), @@ -181,6 +182,8 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { _ if layout.is_zst() => OperandRef::zero_sized(layout), _ => { // Neither a scalar nor scalar pair. Load from a place + // FIXME: should we cache `const_data_from_alloc` to avoid repeating this for the + // same `ConstAllocation`? let init = bx.const_data_from_alloc(alloc); let base_addr = bx.static_addr_of(init, alloc_align, None); @@ -568,12 +571,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { self.codegen_consume(bx, place.as_ref()) } - mir::Operand::Constant(ref constant) => { - // This cannot fail because we checked all required_consts in advance. - self.eval_mir_constant_to_operand(bx, constant).unwrap_or_else(|_err| { - span_bug!(constant.span, "erroneous constant not captured by required_consts") - }) - } + mir::Operand::Constant(ref constant) => self.eval_mir_constant_to_operand(bx, constant), } } } diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index e7c3906d9..eb590a45a 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -114,7 +114,8 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { bx.struct_gep(ty, self.llval, 1) } Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => { - // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer. + // ZST fields (even some that require alignment) are not included in Scalar, + // ScalarPair, and Vector layouts, so manually offset the pointer. bx.gep(bx.cx().type_i8(), self.llval, &[bx.const_usize(offset.bytes())]) } Abi::Scalar(_) | Abi::ScalarPair(..) => { @@ -462,7 +463,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::ProjectionElem::Field(ref field, _) => { cg_base.project_field(bx, field.index()) } - mir::ProjectionElem::OpaqueCast(ty) => cg_base.project_type(bx, ty), + mir::ProjectionElem::OpaqueCast(ty) => { + bug!("encountered OpaqueCast({ty}) in codegen") + } + mir::ProjectionElem::Subtype(ty) => cg_base.project_type(bx, self.monomorphize(ty)), mir::ProjectionElem::Index(index) => { let index = &mir::Operand::Copy(mir::Place::from(index)); let index = self.codegen_operand(bx, index); diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 07c61df21..fc8d33891 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -1004,6 +1004,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Rvalue::Aggregate(..) => { let ty = rvalue.ty(self.mir, self.cx.tcx()); let ty = self.monomorphize(ty); + // For ZST this can be `OperandValueKind::ZeroSized`. self.cx.spanned_layout_of(ty, span).is_zst() } } diff --git a/compiler/rustc_codegen_ssa/src/traits/backend.rs b/compiler/rustc_codegen_ssa/src/traits/backend.rs index 0a02ca6b3..ac8123bc1 100644 --- a/compiler/rustc_codegen_ssa/src/traits/backend.rs +++ b/compiler/rustc_codegen_ssa/src/traits/backend.rs @@ -11,9 +11,9 @@ use rustc_data_structures::sync::{DynSend, DynSync}; use rustc_errors::ErrorGuaranteed; use rustc_metadata::EncodedMetadata; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; -use rustc_middle::query::{ExternProviders, Providers}; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf, TyAndLayout}; use rustc_middle::ty::{Ty, TyCtxt}; +use rustc_middle::util::Providers; use rustc_session::{ config::{self, OutputFilenames, PrintRequest}, cstore::MetadataLoaderDyn, @@ -85,7 +85,6 @@ pub trait CodegenBackend { } fn provide(&self, _providers: &mut Providers) {} - fn provide_extern(&self, _providers: &mut ExternProviders) {} fn codegen_crate<'tcx>( &self, tcx: TyCtxt<'tcx>, diff --git a/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs b/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs index 63fecaf34..4acc0ea07 100644 --- a/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs +++ b/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs @@ -26,7 +26,7 @@ pub trait DebugInfoMethods<'tcx>: BackendTypes { fn_abi: &FnAbi<'tcx, Ty<'tcx>>, llfn: Self::Function, mir: &mir::Body<'tcx>, - ) -> Option<FunctionDebugContext<Self::DIScope, Self::DILocation>>; + ) -> Option<FunctionDebugContext<'tcx, Self::DIScope, Self::DILocation>>; // FIXME(eddyb) find a common convention for all of the debuginfo-related // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.). |