summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
commitc23a457e72abe608715ac76f076f47dc42af07a5 (patch)
tree2772049aaf84b5c9d0ed12ec8d86812f7a7904b6 /compiler
parentReleasing progress-linux version 1.73.0+dfsg1-1~progress7.99u1. (diff)
downloadrustc-c23a457e72abe608715ac76f076f47dc42af07a5.tar.xz
rustc-c23a457e72abe608715ac76f076f47dc42af07a5.zip
Merging upstream version 1.74.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler')
-rw-r--r--compiler/rustc/Cargo.toml1
-rw-r--r--compiler/rustc_abi/src/layout.rs25
-rw-r--r--compiler/rustc_abi/src/lib.rs53
-rw-r--r--compiler/rustc_arena/src/lib.rs7
-rw-r--r--compiler/rustc_ast/src/ast.rs8
-rw-r--r--compiler/rustc_ast/src/attr/mod.rs16
-rw-r--r--compiler/rustc_ast/src/mut_visit.rs5
-rw-r--r--compiler/rustc_ast/src/token.rs2
-rw-r--r--compiler/rustc_ast/src/tokenstream.rs18
-rw-r--r--compiler/rustc_ast/src/util/classify.rs2
-rw-r--r--compiler/rustc_ast/src/visit.rs5
-rw-r--r--compiler/rustc_ast_lowering/messages.ftl4
-rw-r--r--compiler/rustc_ast_lowering/src/expr.rs6
-rw-r--r--compiler/rustc_ast_lowering/src/index.rs30
-rw-r--r--compiler/rustc_ast_lowering/src/item.rs2
-rw-r--r--compiler/rustc_ast_lowering/src/lib.rs95
-rw-r--r--compiler/rustc_ast_passes/messages.ftl22
-rw-r--r--compiler/rustc_ast_passes/src/ast_validation.rs188
-rw-r--r--compiler/rustc_ast_passes/src/errors.rs46
-rw-r--r--compiler/rustc_ast_passes/src/feature_gate.rs25
-rw-r--r--compiler/rustc_ast_pretty/src/pprust/state.rs8
-rw-r--r--compiler/rustc_ast_pretty/src/pprust/state/expr.rs2
-rw-r--r--compiler/rustc_ast_pretty/src/pprust/state/item.rs6
-rw-r--r--compiler/rustc_attr/src/builtin.rs2
-rw-r--r--compiler/rustc_borrowck/messages.ftl11
-rw-r--r--compiler/rustc_borrowck/src/borrow_set.rs2
-rw-r--r--compiler/rustc_borrowck/src/def_use.rs2
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs64
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/mod.rs13
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs306
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/region_errors.rs4
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/region_name.rs18
-rw-r--r--compiler/rustc_borrowck/src/invalidation.rs16
-rw-r--r--compiler/rustc_borrowck/src/lib.rs44
-rw-r--r--compiler/rustc_borrowck/src/nll.rs6
-rw-r--r--compiler/rustc_borrowck/src/places_conflict.rs8
-rw-r--r--compiler/rustc_borrowck/src/prefixes.rs3
-rw-r--r--compiler/rustc_borrowck/src/region_infer/mod.rs9
-rw-r--r--compiler/rustc_borrowck/src/renumber.rs23
-rw-r--r--compiler/rustc_borrowck/src/session_diagnostics.rs7
-rw-r--r--compiler/rustc_borrowck/src/type_check/canonical.rs19
-rw-r--r--compiler/rustc_borrowck/src/type_check/mod.rs90
-rw-r--r--compiler/rustc_borrowck/src/type_check/relate_tys.rs9
-rw-r--r--compiler/rustc_borrowck/src/universal_regions.rs48
-rw-r--r--compiler/rustc_builtin_macros/messages.ftl6
-rw-r--r--compiler/rustc_builtin_macros/src/assert/context.rs2
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/bounds.rs2
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs19
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs19
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/generic/mod.rs34
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/mod.rs98
-rw-r--r--compiler/rustc_builtin_macros/src/errors.rs35
-rw-r--r--compiler/rustc_builtin_macros/src/format.rs37
-rw-r--r--compiler/rustc_builtin_macros/src/source_util.rs2
-rw-r--r--compiler/rustc_builtin_macros/src/test_harness.rs8
-rw-r--r--compiler/rustc_codegen_cranelift/docs/usage.md2
-rw-r--r--compiler/rustc_codegen_cranelift/patches/stdlib-lock.toml12
-rw-r--r--compiler/rustc_codegen_cranelift/rust-toolchain2
-rwxr-xr-xcompiler/rustc_codegen_cranelift/scripts/filter_profile.rs6
-rwxr-xr-xcompiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh2
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/mod.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs22
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/returning.rs24
-rw-r--r--compiler/rustc_codegen_cranelift/src/base.rs26
-rw-r--r--compiler/rustc_codegen_cranelift/src/common.rs4
-rw-r--r--compiler/rustc_codegen_cranelift/src/constant.rs97
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs9
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/aot.rs41
-rw-r--r--compiler/rustc_codegen_cranelift/src/inline_asm.rs7
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs248
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs53
-rw-r--r--compiler/rustc_codegen_cranelift/src/unsize.rs4
-rw-r--r--compiler/rustc_codegen_cranelift/src/value_and_place.rs4
-rw-r--r--compiler/rustc_codegen_cranelift/src/vtable.rs19
-rw-r--r--compiler/rustc_codegen_gcc/src/abi.rs14
-rw-r--r--compiler/rustc_codegen_gcc/src/builder.rs2
-rw-r--r--compiler/rustc_codegen_gcc/src/callee.rs2
-rw-r--r--compiler/rustc_codegen_gcc/src/context.rs3
-rw-r--r--compiler/rustc_codegen_gcc/src/debuginfo.rs2
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/mod.rs10
-rw-r--r--compiler/rustc_codegen_gcc/src/lib.rs2
-rw-r--r--compiler/rustc_codegen_gcc/src/type_of.rs48
-rw-r--r--compiler/rustc_codegen_llvm/messages.ftl4
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs99
-rw-r--r--compiler/rustc_codegen_llvm/src/back/archive.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs52
-rw-r--r--compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs103
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs146
-rw-r--r--compiler/rustc_codegen_llvm/src/callee.rs3
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs46
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs248
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs200
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs169
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs38
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs15
-rw-r--r--compiler/rustc_codegen_llvm/src/errors.rs10
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs61
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs29
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs26
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs6
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs38
-rw-r--r--compiler/rustc_codegen_ssa/messages.ftl10
-rw-r--r--compiler/rustc_codegen_ssa/src/back/link.rs8
-rw-r--r--compiler/rustc_codegen_ssa/src/back/metadata.rs29
-rw-r--r--compiler/rustc_codegen_ssa/src/back/symbol_export.rs16
-rw-r--r--compiler/rustc_codegen_ssa/src/back/write.rs16
-rw-r--r--compiler/rustc_codegen_ssa/src/base.rs39
-rw-r--r--compiler/rustc_codegen_ssa/src/codegen_attrs.rs21
-rw-r--r--compiler/rustc_codegen_ssa/src/common.rs6
-rw-r--r--compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs47
-rw-r--r--compiler/rustc_codegen_ssa/src/errors.rs35
-rw-r--r--compiler/rustc_codegen_ssa/src/lib.rs7
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/analyze.rs6
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs211
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/constant.rs68
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/debuginfo.rs176
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/intrinsic.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/mod.rs43
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/operand.rs50
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/place.rs8
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs1
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/backend.rs3
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/debuginfo.rs2
-rw-r--r--compiler/rustc_const_eval/messages.ftl5
-rw-r--r--compiler/rustc_const_eval/src/const_eval/error.rs39
-rw-r--r--compiler/rustc_const_eval/src/const_eval/eval_queries.rs128
-rw-r--r--compiler/rustc_const_eval/src/const_eval/machine.rs15
-rw-r--r--compiler/rustc_const_eval/src/const_eval/mod.rs17
-rw-r--r--compiler/rustc_const_eval/src/const_eval/valtrees.rs119
-rw-r--r--compiler/rustc_const_eval/src/errors.rs37
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs138
-rw-r--r--compiler/rustc_const_eval/src/interpret/discriminant.rs20
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs277
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs25
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs45
-rw-r--r--compiler/rustc_const_eval/src/interpret/machine.rs60
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs7
-rw-r--r--compiler/rustc_const_eval/src/interpret/mod.rs11
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs338
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs147
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs384
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs89
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs8
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs445
-rw-r--r--compiler/rustc_const_eval/src/interpret/traits.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/util.rs5
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs20
-rw-r--r--compiler/rustc_const_eval/src/interpret/visitor.rs5
-rw-r--r--compiler/rustc_const_eval/src/lib.rs7
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/check.rs13
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs4
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs13
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/resolver.rs2
-rw-r--r--compiler/rustc_const_eval/src/transform/promote_consts.rs24
-rw-r--r--compiler/rustc_const_eval/src/transform/validate.rs158
-rw-r--r--compiler/rustc_const_eval/src/util/alignment.rs3
-rw-r--r--compiler/rustc_const_eval/src/util/check_validity_requirement.rs2
-rw-r--r--compiler/rustc_const_eval/src/util/compare_types.rs14
-rw-r--r--compiler/rustc_const_eval/src/util/mod.rs4
-rw-r--r--compiler/rustc_const_eval/src/util/type_name.rs3
-rw-r--r--compiler/rustc_data_structures/src/flock/unix.rs24
-rw-r--r--compiler/rustc_data_structures/src/graph/dominators/mod.rs6
-rw-r--r--compiler/rustc_data_structures/src/graph/implementation/mod.rs23
-rw-r--r--compiler/rustc_data_structures/src/lib.rs24
-rw-r--r--compiler/rustc_data_structures/src/marker.rs2
-rw-r--r--compiler/rustc_data_structures/src/memmap.rs7
-rw-r--r--compiler/rustc_data_structures/src/profiling.rs4
-rw-r--r--compiler/rustc_data_structures/src/sharded.rs159
-rw-r--r--compiler/rustc_data_structures/src/small_c_str.rs6
-rw-r--r--compiler/rustc_data_structures/src/sync.rs338
-rw-r--r--compiler/rustc_data_structures/src/sync/freeze.rs200
-rw-r--r--compiler/rustc_data_structures/src/sync/lock.rs275
-rw-r--r--compiler/rustc_data_structures/src/sync/parallel.rs188
-rw-r--r--compiler/rustc_data_structures/src/sync/worker_local.rs20
-rw-r--r--compiler/rustc_driver_impl/src/lib.rs149
-rw-r--r--compiler/rustc_driver_impl/src/signal_handler.rs142
-rw-r--r--compiler/rustc_error_codes/src/error_codes.rs4
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0038.md7
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0094.md4
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0191.md6
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0211.md4
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0401.md6
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0445.md8
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0446.md46
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0647.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0691.md4
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0698.md4
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0760.md2
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0788.md16
-rw-r--r--compiler/rustc_error_messages/src/lib.rs2
-rw-r--r--compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs5
-rw-r--r--compiler/rustc_errors/src/diagnostic.rs16
-rw-r--r--compiler/rustc_errors/src/diagnostic_impls.rs2
-rw-r--r--compiler/rustc_errors/src/emitter.rs43
-rw-r--r--compiler/rustc_errors/src/json.rs24
-rw-r--r--compiler/rustc_errors/src/lib.rs62
-rw-r--r--compiler/rustc_expand/src/expand.rs2
-rw-r--r--compiler/rustc_expand/src/lib.rs2
-rw-r--r--compiler/rustc_feature/src/accepted.rs4
-rw-r--r--compiler/rustc_feature/src/active.rs22
-rw-r--r--compiler/rustc_feature/src/builtin_attrs.rs13
-rw-r--r--compiler/rustc_feature/src/removed.rs3
-rw-r--r--compiler/rustc_hir/src/def.rs10
-rw-r--r--compiler/rustc_hir/src/definitions.rs3
-rw-r--r--compiler/rustc_hir/src/hir.rs38
-rw-r--r--compiler/rustc_hir/src/intravisit.rs6
-rw-r--r--compiler/rustc_hir/src/lang_items.rs1
-rw-r--r--compiler/rustc_hir/src/lib.rs2
-rw-r--r--compiler/rustc_hir/src/target.rs2
-rw-r--r--compiler/rustc_hir_analysis/messages.ftl103
-rw-r--r--compiler/rustc_hir_analysis/src/astconv/bounds.rs2
-rw-r--r--compiler/rustc_hir_analysis/src/astconv/errors.rs63
-rw-r--r--compiler/rustc_hir_analysis/src/astconv/generics.rs17
-rw-r--r--compiler/rustc_hir_analysis/src/astconv/mod.rs99
-rw-r--r--compiler/rustc_hir_analysis/src/check/check.rs265
-rw-r--r--compiler/rustc_hir_analysis/src/check/compare_impl_item.rs121
-rw-r--r--compiler/rustc_hir_analysis/src/check/compare_impl_item/refine.rs332
-rw-r--r--compiler/rustc_hir_analysis/src/check/entry.rs58
-rw-r--r--compiler/rustc_hir_analysis/src/check/intrinsic.rs65
-rw-r--r--compiler/rustc_hir_analysis/src/check/intrinsicck.rs53
-rw-r--r--compiler/rustc_hir_analysis/src/check/mod.rs153
-rw-r--r--compiler/rustc_hir_analysis/src/check/region.rs6
-rw-r--r--compiler/rustc_hir_analysis/src/check/wfcheck.rs126
-rw-r--r--compiler/rustc_hir_analysis/src/coherence/builtin.rs212
-rw-r--r--compiler/rustc_hir_analysis/src/coherence/inherent_impls.rs94
-rw-r--r--compiler/rustc_hir_analysis/src/coherence/orphan.rs1
-rw-r--r--compiler/rustc_hir_analysis/src/collect.rs21
-rw-r--r--compiler/rustc_hir_analysis/src/collect/generics_of.rs5
-rw-r--r--compiler/rustc_hir_analysis/src/collect/predicates_of.rs34
-rw-r--r--compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs273
-rw-r--r--compiler/rustc_hir_analysis/src/collect/type_of.rs198
-rw-r--r--compiler/rustc_hir_analysis/src/collect/type_of/opaque.rs9
-rw-r--r--compiler/rustc_hir_analysis/src/errors.rs228
-rw-r--r--compiler/rustc_hir_analysis/src/lib.rs32
-rw-r--r--compiler/rustc_hir_analysis/src/variance/constraints.rs16
-rw-r--r--compiler/rustc_hir_analysis/src/variance/mod.rs16
-rw-r--r--compiler/rustc_hir_analysis/src/variance/terms.rs6
-rw-r--r--compiler/rustc_hir_analysis/src/variance/test.rs15
-rw-r--r--compiler/rustc_hir_pretty/src/lib.rs2
-rw-r--r--compiler/rustc_hir_typeck/messages.ftl66
-rw-r--r--compiler/rustc_hir_typeck/src/_match.rs46
-rw-r--r--compiler/rustc_hir_typeck/src/callee.rs69
-rw-r--r--compiler/rustc_hir_typeck/src/cast.rs274
-rw-r--r--compiler/rustc_hir_typeck/src/check.rs249
-rw-r--r--compiler/rustc_hir_typeck/src/closure.rs2
-rw-r--r--compiler/rustc_hir_typeck/src/demand.rs131
-rw-r--r--compiler/rustc_hir_typeck/src/errors.rs269
-rw-r--r--compiler/rustc_hir_typeck/src/expr.rs44
-rw-r--r--compiler/rustc_hir_typeck/src/expr_use_visitor.rs10
-rw-r--r--compiler/rustc_hir_typeck/src/fallback.rs64
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs80
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs220
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs24
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs23
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs8
-rw-r--r--compiler/rustc_hir_typeck/src/gather_locals.rs2
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_build.rs601
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_propagate.rs92
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_visualize.rs96
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/mod.rs306
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/record_consumed_borrow.rs242
-rw-r--r--compiler/rustc_hir_typeck/src/generator_interior/mod.rs714
-rw-r--r--compiler/rustc_hir_typeck/src/lib.rs7
-rw-r--r--compiler/rustc_hir_typeck/src/mem_categorization.rs5
-rw-r--r--compiler/rustc_hir_typeck/src/method/mod.rs5
-rw-r--r--compiler/rustc_hir_typeck/src/method/suggest.rs3
-rw-r--r--compiler/rustc_hir_typeck/src/upvar.rs61
-rw-r--r--compiler/rustc_hir_typeck/src/writeback.rs8
-rw-r--r--compiler/rustc_incremental/src/assert_dep_graph.rs4
-rw-r--r--compiler/rustc_incremental/src/lib.rs2
-rw-r--r--compiler/rustc_incremental/src/persist/load.rs132
-rw-r--r--compiler/rustc_incremental/src/persist/mod.rs3
-rw-r--r--compiler/rustc_incremental/src/persist/save.rs28
-rw-r--r--compiler/rustc_index/src/lib.rs14
-rw-r--r--compiler/rustc_infer/messages.ftl6
-rw-r--r--compiler/rustc_infer/src/errors/mod.rs21
-rw-r--r--compiler/rustc_infer/src/errors/note_and_explain.rs7
-rw-r--r--compiler/rustc_infer/src/infer/at.rs23
-rw-r--r--compiler/rustc_infer/src/infer/canonical/canonicalizer.rs17
-rw-r--r--compiler/rustc_infer/src/infer/canonical/mod.rs6
-rw-r--r--compiler/rustc_infer/src/infer/combine.rs69
-rw-r--r--compiler/rustc_infer/src/infer/equate.rs20
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/mod.rs36
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs39
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/named_anon_conflict.rs2
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs2
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_relation.rs6
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/trait_impl_difference.rs14
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs20
-rw-r--r--compiler/rustc_infer/src/infer/free_regions.rs9
-rw-r--r--compiler/rustc_infer/src/infer/freshen.rs15
-rw-r--r--compiler/rustc_infer/src/infer/generalize.rs1
-rw-r--r--compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs91
-rw-r--r--compiler/rustc_infer/src/infer/mod.rs92
-rw-r--r--compiler/rustc_infer/src/infer/outlives/components.rs2
-rw-r--r--compiler/rustc_infer/src/infer/sub.rs19
-rw-r--r--compiler/rustc_infer/src/infer/undo_log.rs3
-rw-r--r--compiler/rustc_interface/src/callbacks.rs43
-rw-r--r--compiler/rustc_interface/src/interface.rs13
-rw-r--r--compiler/rustc_interface/src/lib.rs2
-rw-r--r--compiler/rustc_interface/src/passes.rs51
-rw-r--r--compiler/rustc_interface/src/queries.rs59
-rw-r--r--compiler/rustc_interface/src/tests.rs6
-rw-r--r--compiler/rustc_interface/src/util.rs58
-rw-r--r--compiler/rustc_lint/messages.ftl18
-rw-r--r--compiler/rustc_lint/src/array_into_iter.rs2
-rw-r--r--compiler/rustc_lint/src/builtin.rs29
-rw-r--r--compiler/rustc_lint/src/context.rs170
-rw-r--r--compiler/rustc_lint/src/deref_into_dyn_supertrait.rs2
-rw-r--r--compiler/rustc_lint/src/early.rs1
-rw-r--r--compiler/rustc_lint/src/errors.rs52
-rw-r--r--compiler/rustc_lint/src/foreign_modules.rs7
-rw-r--r--compiler/rustc_lint/src/invalid_from_utf8.rs25
-rw-r--r--compiler/rustc_lint/src/late.rs29
-rw-r--r--compiler/rustc_lint/src/levels.rs136
-rw-r--r--compiler/rustc_lint/src/lib.rs17
-rw-r--r--compiler/rustc_lint/src/lints.rs88
-rw-r--r--compiler/rustc_lint/src/noop_method_call.rs6
-rw-r--r--compiler/rustc_lint/src/passes.rs155
-rw-r--r--compiler/rustc_lint/src/ptr_nulls.rs52
-rw-r--r--compiler/rustc_lint/src/reference_casting.rs187
-rw-r--r--compiler/rustc_lint/src/tests.rs2
-rw-r--r--compiler/rustc_lint/src/traits.rs2
-rw-r--r--compiler/rustc_lint/src/types.rs18
-rw-r--r--compiler/rustc_lint/src/unused.rs41
-rw-r--r--compiler/rustc_lint_defs/src/builtin.rs264
-rw-r--r--compiler/rustc_lint_defs/src/lib.rs62
-rw-r--r--compiler/rustc_llvm/build.rs5
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/ArchiveWrapper.cpp5
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp129
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp16
-rw-r--r--compiler/rustc_macros/src/lib.rs2
-rw-r--r--compiler/rustc_metadata/messages.ftl3
-rw-r--r--compiler/rustc_metadata/src/creader.rs10
-rw-r--r--compiler/rustc_metadata/src/fs.rs4
-rw-r--r--compiler/rustc_metadata/src/lib.rs4
-rw-r--r--compiler/rustc_metadata/src/locator.rs3
-rw-r--r--compiler/rustc_metadata/src/rmeta/decoder.rs267
-rw-r--r--compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs20
-rw-r--r--compiler/rustc_metadata/src/rmeta/encoder.rs214
-rw-r--r--compiler/rustc_metadata/src/rmeta/mod.rs22
-rw-r--r--compiler/rustc_metadata/src/rmeta/table.rs157
-rw-r--r--compiler/rustc_middle/messages.ftl2
-rw-r--r--compiler/rustc_middle/src/dep_graph/dep_node.rs53
-rw-r--r--compiler/rustc_middle/src/dep_graph/mod.rs50
-rw-r--r--compiler/rustc_middle/src/error.rs7
-rw-r--r--compiler/rustc_middle/src/hir/map/mod.rs29
-rw-r--r--compiler/rustc_middle/src/hooks/mod.rs65
-rw-r--r--compiler/rustc_middle/src/infer/canonical.rs67
-rw-r--r--compiler/rustc_middle/src/infer/mod.rs2
-rw-r--r--compiler/rustc_middle/src/infer/unify_key.rs50
-rw-r--r--compiler/rustc_middle/src/lib.rs3
-rw-r--r--compiler/rustc_middle/src/lint.rs12
-rw-r--r--compiler/rustc_middle/src/macros.rs4
-rw-r--r--compiler/rustc_middle/src/middle/codegen_fn_attrs.rs2
-rw-r--r--compiler/rustc_middle/src/mir/basic_blocks.rs21
-rw-r--r--compiler/rustc_middle/src/mir/consts.rs522
-rw-r--r--compiler/rustc_middle/src/mir/coverage.rs10
-rw-r--r--compiler/rustc_middle/src/mir/interpret/error.rs91
-rw-r--r--compiler/rustc_middle/src/mir/interpret/mod.rs19
-rw-r--r--compiler/rustc_middle/src/mir/interpret/pointer.rs2
-rw-r--r--compiler/rustc_middle/src/mir/interpret/queries.rs18
-rw-r--r--compiler/rustc_middle/src/mir/interpret/value.rs134
-rw-r--r--compiler/rustc_middle/src/mir/mod.rs1612
-rw-r--r--compiler/rustc_middle/src/mir/mono.rs8
-rw-r--r--compiler/rustc_middle/src/mir/patch.rs27
-rw-r--r--compiler/rustc_middle/src/mir/pretty.rs1251
-rw-r--r--compiler/rustc_middle/src/mir/query.rs8
-rw-r--r--compiler/rustc_middle/src/mir/spanview.rs45
-rw-r--r--compiler/rustc_middle/src/mir/statement.rs464
-rw-r--r--compiler/rustc_middle/src/mir/syntax.rs118
-rw-r--r--compiler/rustc_middle/src/mir/tcx.rs13
-rw-r--r--compiler/rustc_middle/src/mir/terminator.rs421
-rw-r--r--compiler/rustc_middle/src/mir/traversal.rs110
-rw-r--r--compiler/rustc_middle/src/mir/type_foldable.rs2
-rw-r--r--compiler/rustc_middle/src/mir/visit.rs58
-rw-r--r--compiler/rustc_middle/src/query/erase.rs29
-rw-r--r--compiler/rustc_middle/src/query/keys.rs7
-rw-r--r--compiler/rustc_middle/src/query/mod.rs49
-rw-r--r--compiler/rustc_middle/src/query/on_disk_cache.rs9
-rw-r--r--compiler/rustc_middle/src/query/plumbing.rs14
-rw-r--r--compiler/rustc_middle/src/thir.rs26
-rw-r--r--compiler/rustc_middle/src/thir/visit.rs6
-rw-r--r--compiler/rustc_middle/src/traits/mod.rs42
-rw-r--r--compiler/rustc_middle/src/traits/query.rs21
-rw-r--r--compiler/rustc_middle/src/traits/select.rs2
-rw-r--r--compiler/rustc_middle/src/traits/solve.rs63
-rw-r--r--compiler/rustc_middle/src/traits/solve/inspect.rs137
-rw-r--r--compiler/rustc_middle/src/traits/solve/inspect/format.rs118
-rw-r--r--compiler/rustc_middle/src/ty/abstract_const.rs2
-rw-r--r--compiler/rustc_middle/src/ty/adjustment.rs8
-rw-r--r--compiler/rustc_middle/src/ty/adt.rs6
-rw-r--r--compiler/rustc_middle/src/ty/binding.rs2
-rw-r--r--compiler/rustc_middle/src/ty/codec.rs1
-rw-r--r--compiler/rustc_middle/src/ty/consts.rs254
-rw-r--r--compiler/rustc_middle/src/ty/consts/int.rs5
-rw-r--r--compiler/rustc_middle/src/ty/consts/kind.rs45
-rw-r--r--compiler/rustc_middle/src/ty/context.rs111
-rw-r--r--compiler/rustc_middle/src/ty/diagnostics.rs10
-rw-r--r--compiler/rustc_middle/src/ty/erase_regions.rs4
-rw-r--r--compiler/rustc_middle/src/ty/error.rs9
-rw-r--r--compiler/rustc_middle/src/ty/fast_reject.rs13
-rw-r--r--compiler/rustc_middle/src/ty/flags.rs24
-rw-r--r--compiler/rustc_middle/src/ty/fold.rs2
-rw-r--r--compiler/rustc_middle/src/ty/generic_args.rs20
-rw-r--r--compiler/rustc_middle/src/ty/generics.rs12
-rw-r--r--compiler/rustc_middle/src/ty/instance.rs18
-rw-r--r--compiler/rustc_middle/src/ty/layout.rs15
-rw-r--r--compiler/rustc_middle/src/ty/mod.rs97
-rw-r--r--compiler/rustc_middle/src/ty/normalize_erasing_regions.rs24
-rw-r--r--compiler/rustc_middle/src/ty/opaque_types.rs4
-rw-r--r--compiler/rustc_middle/src/ty/parameterized.rs2
-rw-r--r--compiler/rustc_middle/src/ty/print/mod.rs3
-rw-r--r--compiler/rustc_middle/src/ty/print/pretty.rs110
-rw-r--r--compiler/rustc_middle/src/ty/relate.rs14
-rw-r--r--compiler/rustc_middle/src/ty/structural_impls.rs170
-rw-r--r--compiler/rustc_middle/src/ty/sty.rs84
-rw-r--r--compiler/rustc_middle/src/ty/trait_def.rs4
-rw-r--r--compiler/rustc_middle/src/ty/typeck_results.rs89
-rw-r--r--compiler/rustc_middle/src/ty/util.rs34
-rw-r--r--compiler/rustc_middle/src/ty/visit.rs36
-rw-r--r--compiler/rustc_middle/src/ty/vtable.rs6
-rw-r--r--compiler/rustc_middle/src/ty/walk.rs5
-rw-r--r--compiler/rustc_middle/src/util/find_self_call.rs4
-rw-r--r--compiler/rustc_middle/src/util/mod.rs24
-rw-r--r--compiler/rustc_middle/src/values.rs56
-rw-r--r--compiler/rustc_mir_build/messages.ftl3
-rw-r--r--compiler/rustc_mir_build/src/build/cfg.rs6
-rw-r--r--compiler/rustc_mir_build/src/build/custom/parse.rs58
-rw-r--r--compiler/rustc_mir_build/src/build/custom/parse/instruction.rs14
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_constant.rs66
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_place.rs5
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_rvalue.rs10
-rw-r--r--compiler/rustc_mir_build/src/build/expr/into.rs90
-rw-r--r--compiler/rustc_mir_build/src/build/matches/mod.rs56
-rw-r--r--compiler/rustc_mir_build/src/build/matches/test.rs29
-rw-r--r--compiler/rustc_mir_build/src/build/misc.rs14
-rw-r--r--compiler/rustc_mir_build/src/build/mod.rs10
-rw-r--r--compiler/rustc_mir_build/src/build/scope.rs16
-rw-r--r--compiler/rustc_mir_build/src/check_unsafety.rs6
-rw-r--r--compiler/rustc_mir_build/src/errors.rs6
-rw-r--r--compiler/rustc_mir_build/src/lints.rs4
-rw-r--r--compiler/rustc_mir_build/src/thir/cx/expr.rs16
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/check_match.rs58
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs87
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs18
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/mod.rs64
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/usefulness.rs49
-rw-r--r--compiler/rustc_mir_dataflow/src/elaborate_drops.rs13
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/direction.rs8
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/graphviz.rs12
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/visitor.rs12
-rw-r--r--compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs13
-rw-r--r--compiler/rustc_mir_dataflow/src/impls/liveness.rs2
-rw-r--r--compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs8
-rw-r--r--compiler/rustc_mir_dataflow/src/move_paths/abs_domain.rs1
-rw-r--r--compiler/rustc_mir_dataflow/src/move_paths/builder.rs150
-rw-r--r--compiler/rustc_mir_dataflow/src/rustc_peek.rs2
-rw-r--r--compiler/rustc_mir_dataflow/src/value_analysis.rs146
-rw-r--r--compiler/rustc_mir_transform/messages.ftl2
-rw-r--r--compiler/rustc_mir_transform/src/abort_unwinding_calls.rs2
-rw-r--r--compiler/rustc_mir_transform/src/add_call_guards.rs6
-rw-r--r--compiler/rustc_mir_transform/src/add_subtyping_projections.rs70
-rw-r--r--compiler/rustc_mir_transform/src/check_alignment.rs16
-rw-r--r--compiler/rustc_mir_transform/src/check_unsafety.rs12
-rw-r--r--compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs6
-rw-r--r--compiler/rustc_mir_transform/src/const_debuginfo.rs4
-rw-r--r--compiler/rustc_mir_transform/src/const_goto.rs4
-rw-r--r--compiler/rustc_mir_transform/src/const_prop.rs204
-rw-r--r--compiler/rustc_mir_transform/src/const_prop_lint.rs72
-rw-r--r--compiler/rustc_mir_transform/src/copy_prop.rs2
-rw-r--r--compiler/rustc_mir_transform/src/coverage/counters.rs101
-rw-r--r--compiler/rustc_mir_transform/src/coverage/debug.rs802
-rw-r--r--compiler/rustc_mir_transform/src/coverage/graph.rs19
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mod.rs158
-rw-r--r--compiler/rustc_mir_transform/src/coverage/query.rs123
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans.rs101
-rw-r--r--compiler/rustc_mir_transform/src/dataflow_const_prop.rs509
-rw-r--r--compiler/rustc_mir_transform/src/dead_store_elimination.rs6
-rw-r--r--compiler/rustc_mir_transform/src/deduplicate_blocks.rs8
-rw-r--r--compiler/rustc_mir_transform/src/dest_prop.rs4
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_drops.rs19
-rw-r--r--compiler/rustc_mir_transform/src/errors.rs41
-rw-r--r--compiler/rustc_mir_transform/src/generator.rs145
-rw-r--r--compiler/rustc_mir_transform/src/gvn.rs539
-rw-r--r--compiler/rustc_mir_transform/src/inline.rs83
-rw-r--r--compiler/rustc_mir_transform/src/inline/cycle.rs2
-rw-r--r--compiler/rustc_mir_transform/src/instsimplify.rs6
-rw-r--r--compiler/rustc_mir_transform/src/large_enums.rs16
-rw-r--r--compiler/rustc_mir_transform/src/lib.rs28
-rw-r--r--compiler/rustc_mir_transform/src/lower_intrinsics.rs39
-rw-r--r--compiler/rustc_mir_transform/src/match_branches.rs12
-rw-r--r--compiler/rustc_mir_transform/src/normalize_array_len.rs4
-rw-r--r--compiler/rustc_mir_transform/src/pass_manager.rs11
-rw-r--r--compiler/rustc_mir_transform/src/ref_prop.rs2
-rw-r--r--compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs6
-rw-r--r--compiler/rustc_mir_transform/src/remove_zsts.rs12
-rw-r--r--compiler/rustc_mir_transform/src/required_consts.rs18
-rw-r--r--compiler/rustc_mir_transform/src/reveal_all.rs40
-rw-r--r--compiler/rustc_mir_transform/src/separate_const_switch.rs8
-rw-r--r--compiler/rustc_mir_transform/src/shim.rs28
-rw-r--r--compiler/rustc_mir_transform/src/simplify.rs43
-rw-r--r--compiler/rustc_mir_transform/src/simplify_branches.rs4
-rw-r--r--compiler/rustc_mir_transform/src/simplify_comparison_integral.rs4
-rw-r--r--compiler/rustc_mir_transform/src/sroa.rs93
-rw-r--r--compiler/rustc_mir_transform/src/ssa.rs52
-rw-r--r--compiler/rustc_mir_transform/src/unreachable_prop.rs6
-rw-r--r--compiler/rustc_monomorphize/messages.ftl4
-rw-r--r--compiler/rustc_monomorphize/src/collector.rs287
-rw-r--r--compiler/rustc_monomorphize/src/errors.rs10
-rw-r--r--compiler/rustc_monomorphize/src/partitioning.rs6
-rw-r--r--compiler/rustc_monomorphize/src/polymorphize.rs18
-rw-r--r--compiler/rustc_monomorphize/src/util.rs4
-rw-r--r--compiler/rustc_parse/messages.ftl5
-rw-r--r--compiler/rustc_parse/src/errors.rs18
-rw-r--r--compiler/rustc_parse/src/lib.rs4
-rw-r--r--compiler/rustc_parse/src/parser/attr_wrapper.rs2
-rw-r--r--compiler/rustc_parse/src/parser/diagnostics.rs7
-rw-r--r--compiler/rustc_parse/src/parser/expr.rs381
-rw-r--r--compiler/rustc_parse/src/parser/item.rs36
-rw-r--r--compiler/rustc_parse/src/parser/mod.rs1
-rw-r--r--compiler/rustc_parse/src/parser/ty.rs84
-rw-r--r--compiler/rustc_parse_format/Cargo.toml2
-rw-r--r--compiler/rustc_parse_format/src/lib.rs56
-rw-r--r--compiler/rustc_passes/messages.ftl73
-rw-r--r--compiler/rustc_passes/src/abi_test.rs197
-rw-r--r--compiler/rustc_passes/src/check_attr.rs122
-rw-r--r--compiler/rustc_passes/src/check_const.rs4
-rw-r--r--compiler/rustc_passes/src/dead.rs4
-rw-r--r--compiler/rustc_passes/src/errors.rs92
-rw-r--r--compiler/rustc_passes/src/hir_stats.rs2
-rw-r--r--compiler/rustc_passes/src/lang_items.rs12
-rw-r--r--compiler/rustc_passes/src/layout_test.rs97
-rw-r--r--compiler/rustc_passes/src/lib.rs1
-rw-r--r--compiler/rustc_passes/src/reachable.rs4
-rw-r--r--compiler/rustc_privacy/messages.ftl5
-rw-r--r--compiler/rustc_privacy/src/errors.rs23
-rw-r--r--compiler/rustc_privacy/src/lib.rs439
-rw-r--r--compiler/rustc_query_impl/Cargo.toml1
-rw-r--r--compiler/rustc_query_impl/src/lib.rs13
-rw-r--r--compiler/rustc_query_impl/src/plumbing.rs44
-rw-r--r--compiler/rustc_query_system/src/dep_graph/debug.rs14
-rw-r--r--compiler/rustc_query_system/src/dep_graph/dep_node.rs92
-rw-r--r--compiler/rustc_query_system/src/dep_graph/edges.rs73
-rw-r--r--compiler/rustc_query_system/src/dep_graph/graph.rs202
-rw-r--r--compiler/rustc_query_system/src/dep_graph/mod.rs80
-rw-r--r--compiler/rustc_query_system/src/dep_graph/query.rs22
-rw-r--r--compiler/rustc_query_system/src/dep_graph/serialized.rs412
-rw-r--r--compiler/rustc_query_system/src/ich/impls_syntax.rs49
-rw-r--r--compiler/rustc_query_system/src/lib.rs1
-rw-r--r--compiler/rustc_query_system/src/query/caches.rs26
-rw-r--r--compiler/rustc_query_system/src/query/config.rs12
-rw-r--r--compiler/rustc_query_system/src/query/job.rs122
-rw-r--r--compiler/rustc_query_system/src/query/mod.rs10
-rw-r--r--compiler/rustc_query_system/src/query/plumbing.rs167
-rw-r--r--compiler/rustc_query_system/src/values.rs12
-rw-r--r--compiler/rustc_resolve/messages.ftl33
-rw-r--r--compiler/rustc_resolve/src/build_reduced_graph.rs28
-rw-r--r--compiler/rustc_resolve/src/diagnostics.rs61
-rw-r--r--compiler/rustc_resolve/src/errors.rs44
-rw-r--r--compiler/rustc_resolve/src/ident.rs81
-rw-r--r--compiler/rustc_resolve/src/imports.rs15
-rw-r--r--compiler/rustc_resolve/src/late.rs130
-rw-r--r--compiler/rustc_resolve/src/late/diagnostics.rs142
-rw-r--r--compiler/rustc_resolve/src/lib.rs137
-rw-r--r--compiler/rustc_resolve/src/macros.rs39
-rw-r--r--compiler/rustc_resolve/src/rustdoc.rs89
-rw-r--r--compiler/rustc_serialize/src/leb128.rs20
-rw-r--r--compiler/rustc_serialize/src/lib.rs3
-rw-r--r--compiler/rustc_serialize/src/opaque.rs277
-rw-r--r--compiler/rustc_serialize/tests/leb128.rs14
-rw-r--r--compiler/rustc_session/messages.ftl3
-rw-r--r--compiler/rustc_session/src/config.rs97
-rw-r--r--compiler/rustc_session/src/cstore.rs6
-rw-r--r--compiler/rustc_session/src/errors.rs6
-rw-r--r--compiler/rustc_session/src/lib.rs2
-rw-r--r--compiler/rustc_session/src/options.rs63
-rw-r--r--compiler/rustc_session/src/output.rs19
-rw-r--r--compiler/rustc_session/src/parse.rs17
-rw-r--r--compiler/rustc_session/src/session.rs37
-rw-r--r--compiler/rustc_session/src/utils.rs56
-rw-r--r--compiler/rustc_smir/Cargo.toml20
-rw-r--r--compiler/rustc_smir/src/lib.rs16
-rw-r--r--compiler/rustc_smir/src/rustc_internal/mod.rs190
-rw-r--r--compiler/rustc_smir/src/rustc_smir/alloc.rs123
-rw-r--r--compiler/rustc_smir/src/rustc_smir/mod.rs644
-rw-r--r--compiler/rustc_smir/src/stable_mir/ty.rs463
-rw-r--r--compiler/rustc_span/src/analyze_source_file.rs48
-rw-r--r--compiler/rustc_span/src/analyze_source_file/tests.rs29
-rw-r--r--compiler/rustc_span/src/caching_source_map_view.rs15
-rw-r--r--compiler/rustc_span/src/hygiene.rs126
-rw-r--r--compiler/rustc_span/src/lib.rs512
-rw-r--r--compiler/rustc_span/src/source_map.rs217
-rw-r--r--compiler/rustc_span/src/source_map/tests.rs38
-rw-r--r--compiler/rustc_span/src/span_encoding.rs255
-rw-r--r--compiler/rustc_span/src/symbol.rs24
-rw-r--r--compiler/rustc_span/src/tests.rs27
-rw-r--r--compiler/rustc_symbol_mangling/src/legacy.rs2
-rw-r--r--compiler/rustc_symbol_mangling/src/lib.rs9
-rw-r--r--compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs29
-rw-r--r--compiler/rustc_symbol_mangling/src/v0.rs9
-rw-r--r--compiler/rustc_target/src/abi/call/loongarch.rs11
-rw-r--r--compiler/rustc_target/src/abi/call/mod.rs162
-rw-r--r--compiler/rustc_target/src/abi/call/riscv.rs11
-rw-r--r--compiler/rustc_target/src/abi/call/wasm.rs4
-rw-r--r--compiler/rustc_target/src/abi/call/x86.rs6
-rw-r--r--compiler/rustc_target/src/abi/mod.rs42
-rw-r--r--compiler/rustc_target/src/json.rs4
-rw-r--r--compiler/rustc_target/src/lib.rs2
-rw-r--r--compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs3
-rw-r--r--compiler/rustc_target/src/spec/abi.rs46
-rw-r--r--compiler/rustc_target/src/spec/apple_base.rs99
-rw-r--r--compiler/rustc_target/src/spec/armv7_apple_ios.rs21
-rw-r--r--compiler/rustc_target/src/spec/armv7s_apple_ios.rs4
-rw-r--r--compiler/rustc_target/src/spec/hurd_base.rs15
-rw-r--r--compiler/rustc_target/src/spec/hurd_gnu_base.rs5
-rw-r--r--compiler/rustc_target/src/spec/i686_pc_windows_gnullvm.rs26
-rw-r--r--compiler/rustc_target/src/spec/i686_unknown_hurd_gnu.rs19
-rw-r--r--compiler/rustc_target/src/spec/mod.rs15
-rw-r--r--compiler/rustc_target/src/spec/riscv64_linux_android.rs2
-rw-r--r--compiler/rustc_target/src/spec/uefi_msvc_base.rs1
-rw-r--r--compiler/rustc_target/src/spec/x86_64_apple_darwin.rs2
-rw-r--r--compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs3
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_uefi.rs3
-rw-r--r--compiler/rustc_trait_selection/messages.ftl5
-rw-r--r--compiler/rustc_trait_selection/src/solve/alias_relate.rs6
-rw-r--r--compiler/rustc_trait_selection/src/solve/assembly/mod.rs78
-rw-r--r--compiler/rustc_trait_selection/src/solve/assembly/structural_traits.rs26
-rw-r--r--compiler/rustc_trait_selection/src/solve/canonicalize.rs16
-rw-r--r--compiler/rustc_trait_selection/src/solve/eval_ctxt.rs70
-rw-r--r--compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs108
-rw-r--r--compiler/rustc_trait_selection/src/solve/eval_ctxt/probe.rs42
-rw-r--r--compiler/rustc_trait_selection/src/solve/eval_ctxt/select.rs4
-rw-r--r--compiler/rustc_trait_selection/src/solve/inspect.rs428
-rw-r--r--compiler/rustc_trait_selection/src/solve/inspect/analyse.rs235
-rw-r--r--compiler/rustc_trait_selection/src/solve/inspect/build.rs522
-rw-r--r--compiler/rustc_trait_selection/src/solve/inspect/mod.rs7
-rw-r--r--compiler/rustc_trait_selection/src/solve/mod.rs16
-rw-r--r--compiler/rustc_trait_selection/src/solve/project_goals.rs43
-rw-r--r--compiler/rustc_trait_selection/src/solve/search_graph/cache.rs102
-rw-r--r--compiler/rustc_trait_selection/src/solve/search_graph/mod.rs164
-rw-r--r--compiler/rustc_trait_selection/src/solve/trait_goals.rs21
-rw-r--r--compiler/rustc_trait_selection/src/traits/auto_trait.rs2
-rw-r--r--compiler/rustc_trait_selection/src/traits/coherence.rs250
-rw-r--r--compiler/rustc_trait_selection/src/traits/const_evaluatable.rs8
-rw-r--r--compiler/rustc_trait_selection/src/traits/engine.rs15
-rw-r--r--compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs42
-rw-r--r--compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs117
-rw-r--r--compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs503
-rw-r--r--compiler/rustc_trait_selection/src/traits/fulfill.rs44
-rw-r--r--compiler/rustc_trait_selection/src/traits/mod.rs2
-rw-r--r--compiler/rustc_trait_selection/src/traits/project.rs35
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs39
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/normalize.rs2
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs2
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/type_op/outlives.rs2
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs13
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/confirmation.rs10
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/mod.rs83
-rw-r--r--compiler/rustc_trait_selection/src/traits/specialize/mod.rs12
-rw-r--r--compiler/rustc_trait_selection/src/traits/structural_match.rs2
-rw-r--r--compiler/rustc_trait_selection/src/traits/structural_normalize.rs12
-rw-r--r--compiler/rustc_trait_selection/src/traits/vtable.rs2
-rw-r--r--compiler/rustc_trait_selection/src/traits/wf.rs1
-rw-r--r--compiler/rustc_traits/Cargo.toml3
-rw-r--r--compiler/rustc_traits/src/dropck_outlives.rs3
-rw-r--r--compiler/rustc_traits/src/normalize_projection_ty.rs26
-rw-r--r--compiler/rustc_transmute/src/layout/tree.rs3
-rw-r--r--compiler/rustc_transmute/src/lib.rs15
-rw-r--r--compiler/rustc_transmute/src/maybe_transmutable/mod.rs2
-rw-r--r--compiler/rustc_ty_utils/src/abi.rs25
-rw-r--r--compiler/rustc_ty_utils/src/implied_bounds.rs4
-rw-r--r--compiler/rustc_ty_utils/src/instance.rs26
-rw-r--r--compiler/rustc_ty_utils/src/layout.rs14
-rw-r--r--compiler/rustc_ty_utils/src/needs_drop.rs58
-rw-r--r--compiler/rustc_ty_utils/src/opaque_types.rs8
-rw-r--r--compiler/rustc_ty_utils/src/ty.rs28
-rw-r--r--compiler/rustc_type_ir/src/fold.rs14
-rw-r--r--compiler/rustc_type_ir/src/lib.rs48
-rw-r--r--compiler/rustc_type_ir/src/sty.rs107
-rw-r--r--compiler/rustc_type_ir/src/visit.rs5
-rw-r--r--compiler/stable_mir/Cargo.toml8
-rw-r--r--compiler/stable_mir/README.md (renamed from compiler/rustc_smir/README.md)0
-rw-r--r--compiler/stable_mir/rust-toolchain.toml (renamed from compiler/rustc_smir/rust-toolchain.toml)0
-rw-r--r--compiler/stable_mir/src/fold.rs245
-rw-r--r--compiler/stable_mir/src/lib.rs (renamed from compiler/rustc_smir/src/stable_mir/mod.rs)127
-rw-r--r--compiler/stable_mir/src/mir.rs (renamed from compiler/rustc_smir/src/stable_mir/mir.rs)0
-rw-r--r--compiler/stable_mir/src/mir/body.rs (renamed from compiler/rustc_smir/src/stable_mir/mir/body.rs)79
-rw-r--r--compiler/stable_mir/src/ty.rs567
-rw-r--r--compiler/stable_mir/src/visitor.rs203
692 files changed, 23786 insertions, 19091 deletions
diff --git a/compiler/rustc/Cargo.toml b/compiler/rustc/Cargo.toml
index 41003ad83..dcb165f9f 100644
--- a/compiler/rustc/Cargo.toml
+++ b/compiler/rustc/Cargo.toml
@@ -13,6 +13,7 @@ rustc_codegen_ssa = { path = "../rustc_codegen_ssa" }
# Make sure rustc_smir ends up in the sysroot, because this
# crate is intended to be used by stable MIR consumers, which are not in-tree
rustc_smir = { path = "../rustc_smir" }
+stable_mir = { path = "../stable_mir" }
[dependencies.jemalloc-sys]
version = "0.5.0"
diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs
index a8a1a9057..0706dc18f 100644
--- a/compiler/rustc_abi/src/layout.rs
+++ b/compiler/rustc_abi/src/layout.rs
@@ -157,8 +157,10 @@ pub trait LayoutCalculator {
// for non-ZST uninhabited data (mostly partial initialization).
let absent = |fields: &IndexSlice<FieldIdx, Layout<'_>>| {
let uninhabited = fields.iter().any(|f| f.abi().is_uninhabited());
- let is_zst = fields.iter().all(|f| f.0.is_zst());
- uninhabited && is_zst
+ // We cannot ignore alignment; that might lead us to entirely discard a variant and
+ // produce an enum that is less aligned than it should be!
+ let is_1zst = fields.iter().all(|f| f.0.is_1zst());
+ uninhabited && is_1zst
};
let (present_first, present_second) = {
let mut present_variants = variants
@@ -357,10 +359,8 @@ pub trait LayoutCalculator {
// It'll fit, but we need to make some adjustments.
match layout.fields {
FieldsShape::Arbitrary { ref mut offsets, .. } => {
- for (j, offset) in offsets.iter_enumerated_mut() {
- if !variants[i][j].0.is_zst() {
- *offset += this_offset;
- }
+ for offset in offsets.iter_mut() {
+ *offset += this_offset;
}
}
_ => {
@@ -504,7 +504,7 @@ pub trait LayoutCalculator {
// to make room for a larger discriminant.
for field_idx in st.fields.index_by_increasing_offset() {
let field = &field_layouts[FieldIdx::from_usize(field_idx)];
- if !field.0.is_zst() || field.align().abi.bytes() != 1 {
+ if !field.0.is_1zst() {
start_align = start_align.min(field.align().abi);
break;
}
@@ -603,12 +603,15 @@ pub trait LayoutCalculator {
abi = Abi::Scalar(tag);
} else {
// Try to use a ScalarPair for all tagged enums.
+ // That's possible only if we can find a common primitive type for all variants.
let mut common_prim = None;
let mut common_prim_initialized_in_all_variants = true;
for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) {
let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
panic!();
};
+ // We skip *all* ZST here and later check if we are good in terms of alignment.
+ // This lets us handle some cases involving aligned ZST.
let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.0.is_zst());
let (field, offset) = match (fields.next(), fields.next()) {
(None, None) => {
@@ -954,9 +957,6 @@ fn univariant(
};
(
- // Place ZSTs first to avoid "interesting offsets", especially with only one
- // or two non-ZST fields. This helps Scalar/ScalarPair layouts.
- !f.0.is_zst(),
// Then place largest alignments first.
cmp::Reverse(alignment_group_key(f)),
// Then prioritize niche placement within alignment group according to
@@ -1073,9 +1073,10 @@ fn univariant(
let size = min_size.align_to(align.abi);
let mut layout_of_single_non_zst_field = None;
let mut abi = Abi::Aggregate { sized };
- // Unpack newtype ABIs and find scalar pairs.
+ // Try to make this a Scalar/ScalarPair.
if sized && size.bytes() > 0 {
- // All other fields must be ZSTs.
+ // We skip *all* ZST here and later check if we are good in terms of alignment.
+ // This lets us handle some cases involving aligned ZST.
let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst());
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs
index 12dd1542d..b30ff058a 100644
--- a/compiler/rustc_abi/src/lib.rs
+++ b/compiler/rustc_abi/src/lib.rs
@@ -1,5 +1,5 @@
#![cfg_attr(feature = "nightly", feature(step_trait, rustc_attrs, min_specialization))]
-#![cfg_attr(all(not(bootstrap), feature = "nightly"), allow(internal_features))]
+#![cfg_attr(feature = "nightly", allow(internal_features))]
use std::fmt;
#[cfg(feature = "nightly")]
@@ -1300,12 +1300,18 @@ impl Abi {
matches!(*self, Abi::Uninhabited)
}
- /// Returns `true` is this is a scalar type
+ /// Returns `true` if this is a scalar type
#[inline]
pub fn is_scalar(&self) -> bool {
matches!(*self, Abi::Scalar(_))
}
+ /// Returns `true` if this is a bool
+ #[inline]
+ pub fn is_bool(&self) -> bool {
+ matches!(*self, Abi::Scalar(s) if s.is_bool())
+ }
+
/// Returns the fixed alignment of this ABI, if any is mandated.
pub fn inherent_align<C: HasDataLayout>(&self, cx: &C) -> Option<AbiAndPrefAlign> {
Some(match *self {
@@ -1348,6 +1354,23 @@ impl Abi {
Abi::Uninhabited | Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
}
}
+
+ pub fn eq_up_to_validity(&self, other: &Self) -> bool {
+ match (self, other) {
+ // Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
+ // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
+ (Abi::Scalar(l), Abi::Scalar(r)) => l.primitive() == r.primitive(),
+ (
+ Abi::Vector { element: element_l, count: count_l },
+ Abi::Vector { element: element_r, count: count_r },
+ ) => element_l.primitive() == element_r.primitive() && count_l == count_r,
+ (Abi::ScalarPair(l1, l2), Abi::ScalarPair(r1, r2)) => {
+ l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
+ }
+ // Everything else must be strictly identical.
+ _ => self == other,
+ }
+ }
}
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
@@ -1660,15 +1683,25 @@ pub struct PointeeInfo {
impl LayoutS {
/// Returns `true` if the layout corresponds to an unsized type.
+ #[inline]
pub fn is_unsized(&self) -> bool {
self.abi.is_unsized()
}
+ #[inline]
pub fn is_sized(&self) -> bool {
self.abi.is_sized()
}
+ /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
+ pub fn is_1zst(&self) -> bool {
+ self.is_sized() && self.size.bytes() == 0 && self.align.abi.bytes() == 1
+ }
+
/// Returns `true` if the type is a ZST and not unsized.
+ ///
+ /// Note that this does *not* imply that the type is irrelevant for layout! It can still have
+ /// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
pub fn is_zst(&self) -> bool {
match self.abi {
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
@@ -1676,6 +1709,22 @@ impl LayoutS {
Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
}
}
+
+ /// Checks if these two `Layout` are equal enough to be considered "the same for all function
+ /// call ABIs". Note however that real ABIs depend on more details that are not reflected in the
+ /// `Layout`; the `PassMode` need to be compared as well.
+ pub fn eq_abi(&self, other: &Self) -> bool {
+ // The one thing that we are not capturing here is that for unsized types, the metadata must
+ // also have the same ABI, and moreover that the same metadata leads to the same size. The
+ // 2nd point is quite hard to check though.
+ self.size == other.size
+ && self.is_sized() == other.is_sized()
+ && self.abi.eq_up_to_validity(&other.abi)
+ && self.abi.is_bool() == other.abi.is_bool()
+ && self.align.abi == other.align.abi
+ && self.max_repr_align == other.max_repr_align
+ && self.unadjusted_abi_align == other.unadjusted_abi_align
+ }
}
#[derive(Copy, Clone, Debug)]
diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs
index e45b7c154..23fdd272f 100644
--- a/compiler/rustc_arena/src/lib.rs
+++ b/compiler/rustc_arena/src/lib.rs
@@ -24,7 +24,7 @@
#![deny(unsafe_op_in_unsafe_fn)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#![allow(clippy::mut_from_ref)] // Arena allocators are one of the places where this pattern is fine.
use smallvec::SmallVec;
@@ -37,9 +37,10 @@ use std::ptr::{self, NonNull};
use std::slice;
use std::{cmp, intrinsics};
+/// This calls the passed function while ensuring it won't be inlined into the caller.
#[inline(never)]
#[cold]
-fn cold_path<F: FnOnce() -> R, R>(f: F) -> R {
+fn outline<F: FnOnce() -> R, R>(f: F) -> R {
f()
}
@@ -600,7 +601,7 @@ impl DroplessArena {
unsafe { self.write_from_iter(iter, len, mem) }
}
(_, _) => {
- cold_path(move || -> &mut [T] {
+ outline(move || -> &mut [T] {
let mut vec: SmallVec<[_; 8]> = iter.collect();
if vec.is_empty() {
return &mut [];
diff --git a/compiler/rustc_ast/src/ast.rs b/compiler/rustc_ast/src/ast.rs
index 58725a08c..e8cbd7b69 100644
--- a/compiler/rustc_ast/src/ast.rs
+++ b/compiler/rustc_ast/src/ast.rs
@@ -33,7 +33,7 @@ use rustc_macros::HashStable_Generic;
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use rustc_span::source_map::{respan, Spanned};
use rustc_span::symbol::{kw, sym, Ident, Symbol};
-use rustc_span::{Span, DUMMY_SP};
+use rustc_span::{ErrorGuaranteed, Span, DUMMY_SP};
use std::fmt;
use std::mem;
use thin_vec::{thin_vec, ThinVec};
@@ -1426,7 +1426,7 @@ pub enum ExprKind {
/// of `if` / `while` expressions. (e.g., `if let 0 = x { .. }`).
///
/// `Span` represents the whole `let pat = expr` statement.
- Let(P<Pat>, P<Expr>, Span),
+ Let(P<Pat>, P<Expr>, Span, Option<ErrorGuaranteed>),
/// An `if` block, with an optional `else` block.
///
/// `if expr { block } else { expr }`
@@ -2092,6 +2092,10 @@ pub enum TyKind {
Never,
/// A tuple (`(A, B, C, D,...)`).
Tup(ThinVec<P<Ty>>),
+ /// An anonymous struct type i.e. `struct { foo: Type }`
+ AnonStruct(ThinVec<FieldDef>),
+ /// An anonymous union type i.e. `union { bar: Type }`
+ AnonUnion(ThinVec<FieldDef>),
/// A path (`module::module::...::Type`), optionally
/// "qualified", e.g., `<Vec<T> as SomeTrait>::SomeType`.
///
diff --git a/compiler/rustc_ast/src/attr/mod.rs b/compiler/rustc_ast/src/attr/mod.rs
index 19a2b3017..db008ea13 100644
--- a/compiler/rustc_ast/src/attr/mod.rs
+++ b/compiler/rustc_ast/src/attr/mod.rs
@@ -99,6 +99,22 @@ impl Attribute {
}
}
+ pub fn path_matches(&self, name: &[Symbol]) -> bool {
+ match &self.kind {
+ AttrKind::Normal(normal) => {
+ normal.item.path.segments.len() == name.len()
+ && normal
+ .item
+ .path
+ .segments
+ .iter()
+ .zip(name)
+ .all(|(s, n)| s.args.is_none() && s.ident.name == *n)
+ }
+ AttrKind::DocComment(..) => false,
+ }
+ }
+
pub fn is_word(&self) -> bool {
if let AttrKind::Normal(normal) = &self.kind {
matches!(normal.item.args, AttrArgs::Empty)
diff --git a/compiler/rustc_ast/src/mut_visit.rs b/compiler/rustc_ast/src/mut_visit.rs
index 48e9b180b..ba2887146 100644
--- a/compiler/rustc_ast/src/mut_visit.rs
+++ b/compiler/rustc_ast/src/mut_visit.rs
@@ -510,6 +510,9 @@ pub fn noop_visit_ty<T: MutVisitor>(ty: &mut P<Ty>, vis: &mut T) {
visit_vec(bounds, |bound| vis.visit_param_bound(bound));
}
TyKind::MacCall(mac) => vis.visit_mac_call(mac),
+ TyKind::AnonStruct(fields) | TyKind::AnonUnion(fields) => {
+ fields.flat_map_in_place(|field| vis.flat_map_field_def(field));
+ }
}
vis.visit_span(span);
visit_lazy_tts(tokens, vis);
@@ -1363,7 +1366,7 @@ pub fn noop_visit_expr<T: MutVisitor>(
vis.visit_ty(ty);
}
ExprKind::AddrOf(_, _, ohs) => vis.visit_expr(ohs),
- ExprKind::Let(pat, scrutinee, _) => {
+ ExprKind::Let(pat, scrutinee, _, _) => {
vis.visit_pat(pat);
vis.visit_expr(scrutinee);
}
diff --git a/compiler/rustc_ast/src/token.rs b/compiler/rustc_ast/src/token.rs
index f4ad0efa4..300b1486f 100644
--- a/compiler/rustc_ast/src/token.rs
+++ b/compiler/rustc_ast/src/token.rs
@@ -486,6 +486,8 @@ impl Token {
Lt | BinOp(Shl) | // associated path
ModSep => true, // global path
Interpolated(ref nt) => matches!(**nt, NtTy(..) | NtPath(..)),
+ // For anonymous structs or unions, which only appear in specific positions
+ // (type of struct fields or union fields), we don't consider them as regular types
_ => false,
}
}
diff --git a/compiler/rustc_ast/src/tokenstream.rs b/compiler/rustc_ast/src/tokenstream.rs
index e9591c7c8..1e18b1232 100644
--- a/compiler/rustc_ast/src/tokenstream.rs
+++ b/compiler/rustc_ast/src/tokenstream.rs
@@ -213,14 +213,10 @@ impl AttrTokenStream {
.into_iter()
}
AttrTokenTree::Attributes(data) => {
- let mut outer_attrs = Vec::new();
- let mut inner_attrs = Vec::new();
- for attr in &data.attrs {
- match attr.style {
- crate::AttrStyle::Outer => outer_attrs.push(attr),
- crate::AttrStyle::Inner => inner_attrs.push(attr),
- }
- }
+ let idx = data
+ .attrs
+ .partition_point(|attr| matches!(attr.style, crate::AttrStyle::Outer));
+ let (outer_attrs, inner_attrs) = data.attrs.split_at(idx);
let mut target_tokens: Vec<_> = data
.tokens
@@ -265,10 +261,10 @@ impl AttrTokenStream {
"Failed to find trailing delimited group in: {target_tokens:?}"
);
}
- let mut flat: SmallVec<[_; 1]> = SmallVec::new();
+ let mut flat: SmallVec<[_; 1]> =
+ SmallVec::with_capacity(target_tokens.len() + outer_attrs.len());
for attr in outer_attrs {
- // FIXME: Make this more efficient
- flat.extend(attr.tokens().0.clone().iter().cloned());
+ flat.extend(attr.tokens().0.iter().cloned());
}
flat.extend(target_tokens);
flat.into_iter()
diff --git a/compiler/rustc_ast/src/util/classify.rs b/compiler/rustc_ast/src/util/classify.rs
index 607b77705..f9f1c0cf9 100644
--- a/compiler/rustc_ast/src/util/classify.rs
+++ b/compiler/rustc_ast/src/util/classify.rs
@@ -36,7 +36,7 @@ pub fn expr_trailing_brace(mut expr: &ast::Expr) -> Option<&ast::Expr> {
| AssignOp(_, _, e)
| Binary(_, _, e)
| Break(_, Some(e))
- | Let(_, e, _)
+ | Let(_, e, _, _)
| Range(_, Some(e), _)
| Ret(Some(e))
| Unary(_, e)
diff --git a/compiler/rustc_ast/src/visit.rs b/compiler/rustc_ast/src/visit.rs
index 6d474de2d..e66c4a9ee 100644
--- a/compiler/rustc_ast/src/visit.rs
+++ b/compiler/rustc_ast/src/visit.rs
@@ -438,6 +438,9 @@ pub fn walk_ty<'a, V: Visitor<'a>>(visitor: &mut V, typ: &'a Ty) {
TyKind::Infer | TyKind::ImplicitSelf | TyKind::Err => {}
TyKind::MacCall(mac) => visitor.visit_mac_call(mac),
TyKind::Never | TyKind::CVarArgs => {}
+ TyKind::AnonStruct(ref fields, ..) | TyKind::AnonUnion(ref fields, ..) => {
+ walk_list!(visitor, visit_field_def, fields)
+ }
}
}
@@ -824,7 +827,7 @@ pub fn walk_expr<'a, V: Visitor<'a>>(visitor: &mut V, expression: &'a Expr) {
visitor.visit_expr(subexpression);
visitor.visit_ty(typ)
}
- ExprKind::Let(pat, expr, _) => {
+ ExprKind::Let(pat, expr, _, _) => {
visitor.visit_pat(pat);
visitor.visit_expr(expr);
}
diff --git a/compiler/rustc_ast_lowering/messages.ftl b/compiler/rustc_ast_lowering/messages.ftl
index f63a9bfcd..8115c4b55 100644
--- a/compiler/rustc_ast_lowering/messages.ftl
+++ b/compiler/rustc_ast_lowering/messages.ftl
@@ -29,10 +29,6 @@ ast_lowering_bad_return_type_notation_inputs =
argument types not allowed with return type notation
.suggestion = remove the input types
-ast_lowering_bad_return_type_notation_needs_dots =
- return type notation arguments must be elided with `..`
- .suggestion = add `..`
-
ast_lowering_bad_return_type_notation_output =
return type not allowed with return type notation
.suggestion = remove the return type
diff --git a/compiler/rustc_ast_lowering/src/expr.rs b/compiler/rustc_ast_lowering/src/expr.rs
index 7408b4fb0..57c54f854 100644
--- a/compiler/rustc_ast_lowering/src/expr.rs
+++ b/compiler/rustc_ast_lowering/src/expr.rs
@@ -152,13 +152,14 @@ impl<'hir> LoweringContext<'_, 'hir> {
let ohs = self.lower_expr(ohs);
hir::ExprKind::AddrOf(*k, *m, ohs)
}
- ExprKind::Let(pat, scrutinee, span) => {
+ ExprKind::Let(pat, scrutinee, span, is_recovered) => {
hir::ExprKind::Let(self.arena.alloc(hir::Let {
hir_id: self.next_id(),
span: self.lower_span(*span),
pat: self.lower_pat(pat),
ty: None,
init: self.lower_expr(scrutinee),
+ is_recovered: *is_recovered,
}))
}
ExprKind::If(cond, then, else_opt) => {
@@ -558,13 +559,14 @@ impl<'hir> LoweringContext<'_, 'hir> {
fn lower_arm(&mut self, arm: &Arm) -> hir::Arm<'hir> {
let pat = self.lower_pat(&arm.pat);
let guard = arm.guard.as_ref().map(|cond| {
- if let ExprKind::Let(pat, scrutinee, span) = &cond.kind {
+ if let ExprKind::Let(pat, scrutinee, span, is_recovered) = &cond.kind {
hir::Guard::IfLet(self.arena.alloc(hir::Let {
hir_id: self.next_id(),
span: self.lower_span(*span),
pat: self.lower_pat(pat),
ty: None,
init: self.lower_expr(scrutinee),
+ is_recovered: *is_recovered,
}))
} else {
hir::Guard::If(self.lower_expr(cond))
diff --git a/compiler/rustc_ast_lowering/src/index.rs b/compiler/rustc_ast_lowering/src/index.rs
index ce847906f..eff362f3f 100644
--- a/compiler/rustc_ast_lowering/src/index.rs
+++ b/compiler/rustc_ast_lowering/src/index.rs
@@ -2,19 +2,17 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sorted_map::SortedMap;
use rustc_hir as hir;
use rustc_hir::def_id::LocalDefId;
-use rustc_hir::definitions;
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::*;
use rustc_index::{Idx, IndexVec};
use rustc_middle::span_bug;
-use rustc_session::Session;
-use rustc_span::source_map::SourceMap;
+use rustc_middle::ty::TyCtxt;
use rustc_span::{Span, DUMMY_SP};
/// A visitor that walks over the HIR and collects `Node`s into a HIR map.
pub(super) struct NodeCollector<'a, 'hir> {
- /// Source map
- source_map: &'a SourceMap,
+ tcx: TyCtxt<'hir>,
+
bodies: &'a SortedMap<ItemLocalId, &'hir Body<'hir>>,
/// Outputs
@@ -25,14 +23,11 @@ pub(super) struct NodeCollector<'a, 'hir> {
parent_node: hir::ItemLocalId,
owner: OwnerId,
-
- definitions: &'a definitions::Definitions,
}
-#[instrument(level = "debug", skip(sess, definitions, bodies))]
+#[instrument(level = "debug", skip(tcx, bodies))]
pub(super) fn index_hir<'hir>(
- sess: &Session,
- definitions: &definitions::Definitions,
+ tcx: TyCtxt<'hir>,
item: hir::OwnerNode<'hir>,
bodies: &SortedMap<ItemLocalId, &'hir Body<'hir>>,
) -> (IndexVec<ItemLocalId, Option<ParentedNode<'hir>>>, FxHashMap<LocalDefId, ItemLocalId>) {
@@ -42,8 +37,7 @@ pub(super) fn index_hir<'hir>(
// used.
nodes.push(Some(ParentedNode { parent: ItemLocalId::INVALID, node: item.into() }));
let mut collector = NodeCollector {
- source_map: sess.source_map(),
- definitions,
+ tcx,
owner: item.def_id(),
parent_node: ItemLocalId::new(0),
nodes,
@@ -79,11 +73,17 @@ impl<'a, 'hir> NodeCollector<'a, 'hir> {
span,
"inconsistent HirId at `{:?}` for `{:?}`: \
current_dep_node_owner={} ({:?}), hir_id.owner={} ({:?})",
- self.source_map.span_to_diagnostic_string(span),
+ self.tcx.sess.source_map().span_to_diagnostic_string(span),
node,
- self.definitions.def_path(self.owner.def_id).to_string_no_crate_verbose(),
+ self.tcx
+ .definitions_untracked()
+ .def_path(self.owner.def_id)
+ .to_string_no_crate_verbose(),
self.owner,
- self.definitions.def_path(hir_id.owner.def_id).to_string_no_crate_verbose(),
+ self.tcx
+ .definitions_untracked()
+ .def_path(hir_id.owner.def_id)
+ .to_string_no_crate_verbose(),
hir_id.owner,
)
}
diff --git a/compiler/rustc_ast_lowering/src/item.rs b/compiler/rustc_ast_lowering/src/item.rs
index a59c83de0..edc1e2f0b 100644
--- a/compiler/rustc_ast_lowering/src/item.rs
+++ b/compiler/rustc_ast_lowering/src/item.rs
@@ -1308,7 +1308,7 @@ impl<'hir> LoweringContext<'_, 'hir> {
fn lower_asyncness(&mut self, a: Async) -> hir::IsAsync {
match a {
- Async::Yes { .. } => hir::IsAsync::Async,
+ Async::Yes { span, .. } => hir::IsAsync::Async(span),
Async::No => hir::IsAsync::NotAsync,
}
}
diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs
index 4a47de128..85ab5e722 100644
--- a/compiler/rustc_ast_lowering/src/lib.rs
+++ b/compiler/rustc_ast_lowering/src/lib.rs
@@ -153,6 +153,7 @@ trait ResolverAstLoweringExt {
fn get_label_res(&self, id: NodeId) -> Option<NodeId>;
fn get_lifetime_res(&self, id: NodeId) -> Option<LifetimeRes>;
fn take_extra_lifetime_params(&mut self, id: NodeId) -> Vec<(Ident, NodeId, LifetimeRes)>;
+ fn remap_extra_lifetime_params(&mut self, from: NodeId, to: NodeId);
fn decl_macro_kind(&self, def_id: LocalDefId) -> MacroKind;
}
@@ -213,6 +214,11 @@ impl ResolverAstLoweringExt for ResolverAstLowering {
self.extra_lifetime_params_map.remove(&id).unwrap_or_default()
}
+ fn remap_extra_lifetime_params(&mut self, from: NodeId, to: NodeId) {
+ let lifetimes = self.extra_lifetime_params_map.remove(&from).unwrap_or_default();
+ self.extra_lifetime_params_map.insert(to, lifetimes);
+ }
+
fn decl_macro_kind(&self, def_id: LocalDefId) -> MacroKind {
self.builtin_macro_kinds.get(&def_id).copied().unwrap_or(MacroKind::Bang)
}
@@ -236,7 +242,7 @@ enum ImplTraitContext {
ReturnPositionOpaqueTy {
/// Origin: Either OpaqueTyOrigin::FnReturn or OpaqueTyOrigin::AsyncFn,
origin: hir::OpaqueTyOrigin,
- in_trait: bool,
+ fn_kind: FnDeclKind,
},
/// Impl trait in type aliases.
TypeAliasesOpaqueTy { in_assoc_ty: bool },
@@ -312,7 +318,7 @@ impl std::fmt::Display for ImplTraitPosition {
}
}
-#[derive(Debug, PartialEq, Eq)]
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum FnDeclKind {
Fn,
Inherent,
@@ -665,8 +671,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
} else {
(None, None)
};
- let (nodes, parenting) =
- index::index_hir(self.tcx.sess, &*self.tcx.definitions_untracked(), node, &bodies);
+ let (nodes, parenting) = index::index_hir(self.tcx, node, &bodies);
let nodes = hir::OwnerNodes { opt_hash_including_bodies, nodes, bodies };
let attrs = hir::AttributeMap { map: attrs, opt_hash: attrs_hash };
@@ -765,7 +770,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
/// Intercept all spans entering HIR.
/// Mark a span as relative to the current owning item.
fn lower_span(&self, span: Span) -> Span {
- if self.tcx.sess.opts.incremental_relative_spans() {
+ if self.tcx.sess.opts.incremental.is_some() {
span.with_parent(Some(self.current_hir_id_owner.def_id))
} else {
// Do not make spans relative when not using incremental compilation.
@@ -1089,6 +1094,11 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
// constructing the HIR for `impl bounds...` and then lowering that.
let impl_trait_node_id = self.next_node_id();
+ // Shift `impl Trait` lifetime captures from the associated type bound's
+ // node id to the opaque node id, so that the opaque can actually use
+ // these lifetime bounds.
+ self.resolver
+ .remap_extra_lifetime_params(constraint.id, impl_trait_node_id);
self.with_dyn_type_scope(false, |this| {
let node_id = this.next_node_id();
@@ -1293,6 +1303,18 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
TyKind::Err => {
hir::TyKind::Err(self.tcx.sess.delay_span_bug(t.span, "TyKind::Err lowered"))
}
+ // FIXME(unnamed_fields): IMPLEMENTATION IN PROGRESS
+ #[allow(rustc::untranslatable_diagnostic)]
+ #[allow(rustc::diagnostic_outside_of_impl)]
+ TyKind::AnonStruct(ref _fields) => hir::TyKind::Err(
+ self.tcx.sess.span_err(t.span, "anonymous structs are unimplemented"),
+ ),
+ // FIXME(unnamed_fields): IMPLEMENTATION IN PROGRESS
+ #[allow(rustc::untranslatable_diagnostic)]
+ #[allow(rustc::diagnostic_outside_of_impl)]
+ TyKind::AnonUnion(ref _fields) => hir::TyKind::Err(
+ self.tcx.sess.span_err(t.span, "anonymous unions are unimplemented"),
+ ),
TyKind::Slice(ty) => hir::TyKind::Slice(self.lower_ty(ty, itctx)),
TyKind::Ptr(mt) => hir::TyKind::Ptr(self.lower_mt(mt, itctx)),
TyKind::Ref(region, mt) => {
@@ -1389,13 +1411,13 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
TyKind::ImplTrait(def_node_id, bounds) => {
let span = t.span;
match itctx {
- ImplTraitContext::ReturnPositionOpaqueTy { origin, in_trait } => self
+ ImplTraitContext::ReturnPositionOpaqueTy { origin, fn_kind } => self
.lower_opaque_impl_trait(
span,
*origin,
*def_node_id,
bounds,
- *in_trait,
+ Some(*fn_kind),
itctx,
),
&ImplTraitContext::TypeAliasesOpaqueTy { in_assoc_ty } => self
@@ -1404,17 +1426,11 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
hir::OpaqueTyOrigin::TyAlias { in_assoc_ty },
*def_node_id,
bounds,
- false,
+ None,
itctx,
),
ImplTraitContext::Universal => {
let span = t.span;
- self.create_def(
- self.current_hir_id_owner.def_id,
- *def_node_id,
- DefPathData::ImplTrait,
- span,
- );
// HACK: pprust breaks strings with newlines when the type
// gets too long. We don't want these to show up in compiler
@@ -1425,6 +1441,12 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
span,
);
+ self.create_def(
+ self.current_hir_id_owner.def_id,
+ *def_node_id,
+ DefPathData::TypeNs(ident.name),
+ span,
+ );
let (param, bounds, path) = self.lower_universal_param_and_bounds(
*def_node_id,
span,
@@ -1511,7 +1533,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
origin: hir::OpaqueTyOrigin,
opaque_ty_node_id: NodeId,
bounds: &GenericBounds,
- in_trait: bool,
+ fn_kind: Option<FnDeclKind>,
itctx: &ImplTraitContext,
) -> hir::TyKind<'hir> {
// Make sure we know that some funky desugaring has been going on here.
@@ -1528,10 +1550,22 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
Vec::new()
}
hir::OpaqueTyOrigin::FnReturn(..) => {
- // in fn return position, like the `fn test<'a>() -> impl Debug + 'a`
- // example, we only need to duplicate lifetimes that appear in the
- // bounds, since those are the only ones that are captured by the opaque.
- lifetime_collector::lifetimes_in_bounds(&self.resolver, bounds)
+ if let FnDeclKind::Impl | FnDeclKind::Trait =
+ fn_kind.expect("expected RPITs to be lowered with a FnKind")
+ {
+ // return-position impl trait in trait was decided to capture all
+ // in-scope lifetimes, which we collect for all opaques during resolution.
+ self.resolver
+ .take_extra_lifetime_params(opaque_ty_node_id)
+ .into_iter()
+ .map(|(ident, id, _)| Lifetime { id, ident })
+ .collect()
+ } else {
+ // in fn return position, like the `fn test<'a>() -> impl Debug + 'a`
+ // example, we only need to duplicate lifetimes that appear in the
+ // bounds, since those are the only ones that are captured by the opaque.
+ lifetime_collector::lifetimes_in_bounds(&self.resolver, bounds)
+ }
}
hir::OpaqueTyOrigin::AsyncFn(..) => {
unreachable!("should be using `lower_async_fn_ret_ty`")
@@ -1542,7 +1576,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
self.lower_opaque_inner(
opaque_ty_node_id,
origin,
- in_trait,
+ matches!(fn_kind, Some(FnDeclKind::Trait)),
captured_lifetimes_to_duplicate,
span,
opaque_ty_span,
@@ -1630,7 +1664,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
lifetime.ident,
));
- // Now make an arg that we can use for the substs of the opaque tykind.
+ // Now make an arg that we can use for the generic params of the opaque tykind.
let id = self.next_node_id();
let lifetime_arg = self.new_named_lifetime_with_res(id, lifetime.ident, res);
let duplicated_lifetime_def_id = self.local_def_id(duplicated_lifetime_node_id);
@@ -1790,12 +1824,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
}
let fn_def_id = self.local_def_id(fn_node_id);
- self.lower_async_fn_ret_ty(
- &decl.output,
- fn_def_id,
- ret_id,
- matches!(kind, FnDeclKind::Trait),
- )
+ self.lower_async_fn_ret_ty(&decl.output, fn_def_id, ret_id, kind)
} else {
match &decl.output {
FnRetTy::Ty(ty) => {
@@ -1803,7 +1832,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let fn_def_id = self.local_def_id(fn_node_id);
ImplTraitContext::ReturnPositionOpaqueTy {
origin: hir::OpaqueTyOrigin::FnReturn(fn_def_id),
- in_trait: matches!(kind, FnDeclKind::Trait),
+ fn_kind: kind,
}
} else {
let position = match kind {
@@ -1871,7 +1900,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
output: &FnRetTy,
fn_def_id: LocalDefId,
opaque_ty_node_id: NodeId,
- in_trait: bool,
+ fn_kind: FnDeclKind,
) -> hir::FnRetTy<'hir> {
let span = self.lower_span(output.span());
let opaque_ty_span = self.mark_span_with_reason(DesugaringKind::Async, span, None);
@@ -1886,7 +1915,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let opaque_ty_ref = self.lower_opaque_inner(
opaque_ty_node_id,
hir::OpaqueTyOrigin::AsyncFn(fn_def_id),
- in_trait,
+ matches!(fn_kind, FnDeclKind::Trait),
captured_lifetimes,
span,
opaque_ty_span,
@@ -1894,7 +1923,9 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
let future_bound = this.lower_async_fn_output_type_to_future_bound(
output,
span,
- if in_trait && !this.tcx.features().return_position_impl_trait_in_trait {
+ if let FnDeclKind::Trait = fn_kind
+ && !this.tcx.features().return_position_impl_trait_in_trait
+ {
ImplTraitContext::FeatureGated(
ImplTraitPosition::TraitReturn,
sym::return_position_impl_trait_in_trait,
@@ -1902,7 +1933,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
} else {
ImplTraitContext::ReturnPositionOpaqueTy {
origin: hir::OpaqueTyOrigin::FnReturn(fn_def_id),
- in_trait,
+ fn_kind,
}
},
);
diff --git a/compiler/rustc_ast_passes/messages.ftl b/compiler/rustc_ast_passes/messages.ftl
index f323bb4c2..43020a930 100644
--- a/compiler/rustc_ast_passes/messages.ftl
+++ b/compiler/rustc_ast_passes/messages.ftl
@@ -1,3 +1,7 @@
+ast_passes_anon_struct_or_union_not_allowed =
+ anonymous {$struct_or_union}s are not allowed outside of unnamed struct or union fields
+ .label = anonymous {$struct_or_union} declared here
+
ast_passes_assoc_const_without_body =
associated constant in `impl` without body
.suggestion = provide a definition for the constant
@@ -113,16 +117,6 @@ ast_passes_forbidden_default =
`default` is only allowed on items in trait impls
.label = `default` because of this
-ast_passes_forbidden_let =
- `let` expressions are not supported here
- .note = only supported directly in conditions of `if` and `while` expressions
- .not_supported_or = `||` operators are not supported in let chain expressions
- .not_supported_parentheses = `let`s wrapped in parentheses are not supported in a context with let chains
-
-ast_passes_forbidden_let_stable =
- expected expression, found statement (`let`)
- .note = variable declaration using `let` is a statement
-
ast_passes_forbidden_lifetime_bound =
lifetime bounds cannot be used in this context
@@ -162,6 +156,14 @@ ast_passes_inherent_cannot_be = inherent impls cannot be {$annotation}
ast_passes_invalid_label =
invalid label name `{$name}`
+ast_passes_invalid_unnamed_field =
+ unnamed fields are not allowed outside of structs or unions
+ .label = unnamed field declared here
+
+ast_passes_invalid_unnamed_field_ty =
+ unnamed fields can only have struct or union types
+ .label = not a struct or union
+
ast_passes_item_underscore = `{$kind}` items in this context need a name
.label = `_` is not a valid name for this `{$kind}` item
diff --git a/compiler/rustc_ast_passes/src/ast_validation.rs b/compiler/rustc_ast_passes/src/ast_validation.rs
index bd3e676da..7bc685a54 100644
--- a/compiler/rustc_ast_passes/src/ast_validation.rs
+++ b/compiler/rustc_ast_passes/src/ast_validation.rs
@@ -14,14 +14,12 @@ use rustc_ast::{walk_list, StaticItem};
use rustc_ast_pretty::pprust::{self, State};
use rustc_data_structures::fx::FxIndexMap;
use rustc_feature::Features;
-use rustc_macros::Subdiagnostic;
use rustc_parse::validate_attr;
use rustc_session::lint::builtin::{
DEPRECATED_WHERE_CLAUSE_LOCATION, MISSING_ABI, PATTERNS_IN_FNS_WITHOUT_BODY,
};
use rustc_session::lint::{BuiltinLintDiagnostics, LintBuffer};
use rustc_session::Session;
-use rustc_span::source_map::Spanned;
use rustc_span::symbol::{kw, sym, Ident};
use rustc_span::Span;
use rustc_target::spec::abi;
@@ -69,9 +67,6 @@ struct AstValidator<'a> {
/// or `Foo::Bar<impl Trait>`
is_impl_trait_banned: bool,
- /// See [ForbiddenLetReason]
- forbidden_let_reason: Option<ForbiddenLetReason>,
-
lint_buffer: &'a mut LintBuffer,
}
@@ -118,26 +113,6 @@ impl<'a> AstValidator<'a> {
self.with_tilde_const(Some(ctx), f)
}
- fn with_let_management(
- &mut self,
- forbidden_let_reason: Option<ForbiddenLetReason>,
- f: impl FnOnce(&mut Self, Option<ForbiddenLetReason>),
- ) {
- let old = mem::replace(&mut self.forbidden_let_reason, forbidden_let_reason);
- f(self, old);
- self.forbidden_let_reason = old;
- }
-
- /// Emits an error banning the `let` expression provided in the given location.
- fn ban_let_expr(&self, expr: &'a Expr, forbidden_let_reason: ForbiddenLetReason) {
- let sess = &self.session;
- if sess.opts.unstable_features.is_nightly_build() {
- sess.emit_err(errors::ForbiddenLet { span: expr.span, reason: forbidden_let_reason });
- } else {
- sess.emit_err(errors::ForbiddenLetStable { span: expr.span });
- }
- }
-
fn check_type_alias_where_clause_location(
&mut self,
ty_alias: &TyAlias,
@@ -223,10 +198,27 @@ impl<'a> AstValidator<'a> {
}
}
}
+ TyKind::AnonStruct(ref fields, ..) | TyKind::AnonUnion(ref fields, ..) => {
+ walk_list!(self, visit_field_def, fields)
+ }
_ => visit::walk_ty(self, t),
}
}
+ fn visit_struct_field_def(&mut self, field: &'a FieldDef) {
+ if let Some(ident) = field.ident &&
+ ident.name == kw::Underscore {
+ self.check_unnamed_field_ty(&field.ty, ident.span);
+ self.visit_vis(&field.vis);
+ self.visit_ident(ident);
+ self.visit_ty_common(&field.ty);
+ self.walk_ty(&field.ty);
+ walk_list!(self, visit_attribute, &field.attrs);
+ } else {
+ self.visit_field_def(field);
+ }
+ }
+
fn err_handler(&self) -> &rustc_errors::Handler {
&self.session.diagnostic()
}
@@ -264,6 +256,42 @@ impl<'a> AstValidator<'a> {
}
}
+ fn check_unnamed_field_ty(&self, ty: &Ty, span: Span) {
+ if matches!(
+ &ty.kind,
+ // We already checked for `kw::Underscore` before calling this function,
+ // so skip the check
+ TyKind::AnonStruct(..) | TyKind::AnonUnion(..)
+ // If the anonymous field contains a Path as type, we can't determine
+ // if the path is a valid struct or union, so skip the check
+ | TyKind::Path(..)
+ ) {
+ return;
+ }
+ self.err_handler().emit_err(errors::InvalidUnnamedFieldTy { span, ty_span: ty.span });
+ }
+
+ fn deny_anon_struct_or_union(&self, ty: &Ty) {
+ let struct_or_union = match &ty.kind {
+ TyKind::AnonStruct(..) => "struct",
+ TyKind::AnonUnion(..) => "union",
+ _ => return,
+ };
+ self.err_handler()
+ .emit_err(errors::AnonStructOrUnionNotAllowed { struct_or_union, span: ty.span });
+ }
+
+ fn deny_unnamed_field(&self, field: &FieldDef) {
+ if let Some(ident) = field.ident &&
+ ident.name == kw::Underscore {
+ self.err_handler()
+ .emit_err(errors::InvalidUnnamedField {
+ span: field.span,
+ ident_span: ident.span
+ });
+ }
+ }
+
fn check_trait_fn_not_const(&self, constness: Const) {
if let Const::Yes(span) = constness {
self.session.emit_err(errors::TraitFnConst { span });
@@ -726,69 +754,9 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
validate_attr::check_attr(&self.session.parse_sess, attr);
}
- fn visit_expr(&mut self, expr: &'a Expr) {
- self.with_let_management(Some(ForbiddenLetReason::GenericForbidden), |this, forbidden_let_reason| {
- match &expr.kind {
- ExprKind::Binary(Spanned { node: BinOpKind::Or, span }, lhs, rhs) => {
- let local_reason = Some(ForbiddenLetReason::NotSupportedOr(*span));
- this.with_let_management(local_reason, |this, _| this.visit_expr(lhs));
- this.with_let_management(local_reason, |this, _| this.visit_expr(rhs));
- }
- ExprKind::If(cond, then, opt_else) => {
- this.visit_block(then);
- walk_list!(this, visit_expr, opt_else);
- this.with_let_management(None, |this, _| this.visit_expr(cond));
- return;
- }
- ExprKind::Let(..) if let Some(elem) = forbidden_let_reason => {
- this.ban_let_expr(expr, elem);
- },
- ExprKind::Match(scrutinee, arms) => {
- this.visit_expr(scrutinee);
- for arm in arms {
- this.visit_expr(&arm.body);
- this.visit_pat(&arm.pat);
- walk_list!(this, visit_attribute, &arm.attrs);
- if let Some(guard) = &arm.guard {
- this.with_let_management(None, |this, _| {
- this.visit_expr(guard)
- });
- }
- }
- }
- ExprKind::Paren(local_expr) => {
- fn has_let_expr(expr: &Expr) -> bool {
- match &expr.kind {
- ExprKind::Binary(_, lhs, rhs) => has_let_expr(lhs) || has_let_expr(rhs),
- ExprKind::Let(..) => true,
- _ => false,
- }
- }
- let local_reason = if has_let_expr(local_expr) {
- Some(ForbiddenLetReason::NotSupportedParentheses(local_expr.span))
- }
- else {
- forbidden_let_reason
- };
- this.with_let_management(local_reason, |this, _| this.visit_expr(local_expr));
- }
- ExprKind::Binary(Spanned { node: BinOpKind::And, .. }, ..) => {
- this.with_let_management(forbidden_let_reason, |this, _| visit::walk_expr(this, expr));
- return;
- }
- ExprKind::While(cond, then, opt_label) => {
- walk_list!(this, visit_label, opt_label);
- this.visit_block(then);
- this.with_let_management(None, |this, _| this.visit_expr(cond));
- return;
- }
- _ => visit::walk_expr(this, expr),
- }
- });
- }
-
fn visit_ty(&mut self, ty: &'a Ty) {
self.visit_ty_common(ty);
+ self.deny_anon_struct_or_union(ty);
self.walk_ty(ty)
}
@@ -803,6 +771,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
}
fn visit_field_def(&mut self, field: &'a FieldDef) {
+ self.deny_unnamed_field(field);
visit::walk_field_def(self, field)
}
@@ -995,10 +964,38 @@ impl<'a> Visitor<'a> for AstValidator<'a> {
self.check_mod_file_item_asciionly(item.ident);
}
}
- ItemKind::Union(vdata, ..) => {
+ ItemKind::Struct(vdata, generics) => match vdata {
+ // Duplicating the `Visitor` logic allows catching all cases
+ // of `Anonymous(Struct, Union)` outside of a field struct or union.
+ //
+ // Inside `visit_ty` the validator catches every `Anonymous(Struct, Union)` it
+ // encounters, and only on `ItemKind::Struct` and `ItemKind::Union`
+ // it uses `visit_ty_common`, which doesn't contain that specific check.
+ VariantData::Struct(fields, ..) => {
+ self.visit_vis(&item.vis);
+ self.visit_ident(item.ident);
+ self.visit_generics(generics);
+ walk_list!(self, visit_struct_field_def, fields);
+ walk_list!(self, visit_attribute, &item.attrs);
+ return;
+ }
+ _ => {}
+ },
+ ItemKind::Union(vdata, generics) => {
if vdata.fields().is_empty() {
self.err_handler().emit_err(errors::FieldlessUnion { span: item.span });
}
+ match vdata {
+ VariantData::Struct(fields, ..) => {
+ self.visit_vis(&item.vis);
+ self.visit_ident(item.ident);
+ self.visit_generics(generics);
+ walk_list!(self, visit_struct_field_def, fields);
+ walk_list!(self, visit_attribute, &item.attrs);
+ return;
+ }
+ _ => {}
+ }
}
ItemKind::Const(box ConstItem { defaultness, expr: None, .. }) => {
self.check_defaultness(item.span, *defaultness);
@@ -1518,26 +1515,9 @@ pub fn check_crate(
outer_impl_trait: None,
disallow_tilde_const: None,
is_impl_trait_banned: false,
- forbidden_let_reason: Some(ForbiddenLetReason::GenericForbidden),
lint_buffer: lints,
};
visit::walk_crate(&mut validator, krate);
validator.has_proc_macro_decls
}
-
-/// Used to forbid `let` expressions in certain syntactic locations.
-#[derive(Clone, Copy, Subdiagnostic)]
-pub(crate) enum ForbiddenLetReason {
- /// `let` is not valid and the source environment is not important
- GenericForbidden,
- /// A let chain with the `||` operator
- #[note(ast_passes_not_supported_or)]
- NotSupportedOr(#[primary_span] Span),
- /// A let chain with invalid parentheses
- ///
- /// For example, `let 1 = 1 && (expr && expr)` is allowed
- /// but `(let 1 = 1 && (let 1 = 1 && (let 1 = 1))) && let a = 1` is not
- #[note(ast_passes_not_supported_parentheses)]
- NotSupportedParentheses(#[primary_span] Span),
-}
diff --git a/compiler/rustc_ast_passes/src/errors.rs b/compiler/rustc_ast_passes/src/errors.rs
index a6f217d47..e74d94e43 100644
--- a/compiler/rustc_ast_passes/src/errors.rs
+++ b/compiler/rustc_ast_passes/src/errors.rs
@@ -5,28 +5,9 @@ use rustc_errors::AddToDiagnostic;
use rustc_macros::{Diagnostic, Subdiagnostic};
use rustc_span::{symbol::Ident, Span, Symbol};
-use crate::ast_validation::ForbiddenLetReason;
use crate::fluent_generated as fluent;
#[derive(Diagnostic)]
-#[diag(ast_passes_forbidden_let)]
-#[note]
-pub struct ForbiddenLet {
- #[primary_span]
- pub span: Span,
- #[subdiagnostic]
- pub(crate) reason: ForbiddenLetReason,
-}
-
-#[derive(Diagnostic)]
-#[diag(ast_passes_forbidden_let_stable)]
-#[note]
-pub struct ForbiddenLetStable {
- #[primary_span]
- pub span: Span,
-}
-
-#[derive(Diagnostic)]
#[diag(ast_passes_keyword_lifetime)]
pub struct KeywordLifetime {
#[primary_span]
@@ -727,3 +708,30 @@ pub struct ConstraintOnNegativeBound {
#[primary_span]
pub span: Span,
}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_invalid_unnamed_field_ty)]
+pub struct InvalidUnnamedFieldTy {
+ #[primary_span]
+ pub span: Span,
+ #[label]
+ pub ty_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_invalid_unnamed_field)]
+pub struct InvalidUnnamedField {
+ #[primary_span]
+ pub span: Span,
+ #[label]
+ pub ident_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(ast_passes_anon_struct_or_union_not_allowed)]
+pub struct AnonStructOrUnionNotAllowed {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub struct_or_union: &'static str,
+}
diff --git a/compiler/rustc_ast_passes/src/feature_gate.rs b/compiler/rustc_ast_passes/src/feature_gate.rs
index 10c9c3ef1..62dc7ae58 100644
--- a/compiler/rustc_ast_passes/src/feature_gate.rs
+++ b/compiler/rustc_ast_passes/src/feature_gate.rs
@@ -570,6 +570,7 @@ pub fn check_crate(krate: &ast::Crate, sess: &Session, features: &Features) {
gate_all!(builtin_syntax, "`builtin #` syntax is unstable");
gate_all!(explicit_tail_calls, "`become` expression is experimental");
gate_all!(generic_const_items, "generic const items are experimental");
+ gate_all!(unnamed_fields, "unnamed fields are not yet fully implemented");
if !visitor.features.negative_bounds {
for &span in spans.get(&sym::negative_bounds).iter().copied().flatten() {
@@ -577,11 +578,11 @@ pub fn check_crate(krate: &ast::Crate, sess: &Session, features: &Features) {
}
}
- // All uses of `gate_all!` below this point were added in #65742,
+ // All uses of `gate_all_legacy_dont_use!` below this point were added in #65742,
// and subsequently disabled (with the non-early gating readded).
// We emit an early future-incompatible warning for these.
// New syntax gates should go above here to get a hard error gate.
- macro_rules! gate_all {
+ macro_rules! gate_all_legacy_dont_use {
($gate:ident, $msg:literal) => {
for span in spans.get(&sym::$gate).unwrap_or(&vec![]) {
gate_feature_post!(future_incompatible; &visitor, $gate, *span, $msg);
@@ -589,13 +590,19 @@ pub fn check_crate(krate: &ast::Crate, sess: &Session, features: &Features) {
};
}
- gate_all!(trait_alias, "trait aliases are experimental");
- gate_all!(associated_type_bounds, "associated type bounds are unstable");
- gate_all!(return_type_notation, "return type notation is experimental");
- gate_all!(decl_macro, "`macro` is experimental");
- gate_all!(box_patterns, "box pattern syntax is experimental");
- gate_all!(exclusive_range_pattern, "exclusive range pattern syntax is experimental");
- gate_all!(try_blocks, "`try` blocks are unstable");
+ gate_all_legacy_dont_use!(trait_alias, "trait aliases are experimental");
+ gate_all_legacy_dont_use!(associated_type_bounds, "associated type bounds are unstable");
+ // Despite being a new feature, `where T: Trait<Assoc(): Sized>`, which is RTN syntax now,
+ // used to be gated under associated_type_bounds, which are right above, so RTN needs to
+ // be too.
+ gate_all_legacy_dont_use!(return_type_notation, "return type notation is experimental");
+ gate_all_legacy_dont_use!(decl_macro, "`macro` is experimental");
+ gate_all_legacy_dont_use!(box_patterns, "box pattern syntax is experimental");
+ gate_all_legacy_dont_use!(
+ exclusive_range_pattern,
+ "exclusive range pattern syntax is experimental"
+ );
+ gate_all_legacy_dont_use!(try_blocks, "`try` blocks are unstable");
visit::walk_crate(&mut visitor, krate);
}
diff --git a/compiler/rustc_ast_pretty/src/pprust/state.rs b/compiler/rustc_ast_pretty/src/pprust/state.rs
index 58ce73047..8b7e91882 100644
--- a/compiler/rustc_ast_pretty/src/pprust/state.rs
+++ b/compiler/rustc_ast_pretty/src/pprust/state.rs
@@ -1064,6 +1064,14 @@ impl<'a> State<'a> {
}
self.pclose();
}
+ ast::TyKind::AnonStruct(fields) => {
+ self.head("struct");
+ self.print_record_struct_body(&fields, ty.span);
+ }
+ ast::TyKind::AnonUnion(fields) => {
+ self.head("union");
+ self.print_record_struct_body(&fields, ty.span);
+ }
ast::TyKind::Paren(typ) => {
self.popen();
self.print_type(typ);
diff --git a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs
index 39741a039..1142d4921 100644
--- a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs
+++ b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs
@@ -352,7 +352,7 @@ impl<'a> State<'a> {
self.end();
self.word(")");
}
- ast::ExprKind::Let(pat, scrutinee, _) => {
+ ast::ExprKind::Let(pat, scrutinee, _, _) => {
self.print_let(pat, scrutinee);
}
ast::ExprKind::If(test, blk, elseopt) => self.print_if(test, blk, elseopt.as_deref()),
diff --git a/compiler/rustc_ast_pretty/src/pprust/state/item.rs b/compiler/rustc_ast_pretty/src/pprust/state/item.rs
index d27a44f12..3393f034b 100644
--- a/compiler/rustc_ast_pretty/src/pprust/state/item.rs
+++ b/compiler/rustc_ast_pretty/src/pprust/state/item.rs
@@ -443,7 +443,11 @@ impl<'a> State<'a> {
}
}
- fn print_record_struct_body(&mut self, fields: &[ast::FieldDef], span: rustc_span::Span) {
+ pub(crate) fn print_record_struct_body(
+ &mut self,
+ fields: &[ast::FieldDef],
+ span: rustc_span::Span,
+ ) {
self.nbsp();
self.bopen();
diff --git a/compiler/rustc_attr/src/builtin.rs b/compiler/rustc_attr/src/builtin.rs
index 3592287b9..ca4b3662a 100644
--- a/compiler/rustc_attr/src/builtin.rs
+++ b/compiler/rustc_attr/src/builtin.rs
@@ -937,6 +937,7 @@ pub fn find_deprecation(
#[derive(PartialEq, Debug, Encodable, Decodable, Copy, Clone)]
pub enum ReprAttr {
ReprInt(IntType),
+ ReprRust,
ReprC,
ReprPacked(u32),
ReprSimd,
@@ -985,6 +986,7 @@ pub fn parse_repr_attr(sess: &Session, attr: &Attribute) -> Vec<ReprAttr> {
let mut recognised = false;
if item.is_word() {
let hint = match item.name_or_empty() {
+ sym::Rust => Some(ReprRust),
sym::C => Some(ReprC),
sym::packed => Some(ReprPacked(1)),
sym::simd => Some(ReprSimd),
diff --git a/compiler/rustc_borrowck/messages.ftl b/compiler/rustc_borrowck/messages.ftl
index 67fdb6717..2c7b97afa 100644
--- a/compiler/rustc_borrowck/messages.ftl
+++ b/compiler/rustc_borrowck/messages.ftl
@@ -74,9 +74,6 @@ borrowck_higher_ranked_subtype_error =
borrowck_lifetime_constraints_error =
lifetime may not live long enough
-borrowck_move_borrowed =
- cannot move out of `{$desc}` because it is borrowed
-
borrowck_move_out_place_here =
{$place} is moved here
@@ -166,6 +163,8 @@ borrowck_returned_lifetime_wrong =
borrowck_returned_ref_escaped =
returns a reference to a captured variable which escapes the closure body
+borrowck_simd_shuffle_last_const = last argument of `simd_shuffle` is required to be a `const` item
+
borrowck_suggest_create_freash_reborrow =
consider reborrowing the `Pin` instead of moving it
@@ -248,12 +247,6 @@ borrowck_var_move_by_use_in_closure =
borrowck_var_move_by_use_in_generator =
move occurs due to use in generator
-borrowck_var_move_by_use_place_in_closure =
- move occurs due to use of {$place} in closure
-
-borrowck_var_move_by_use_place_in_generator =
- move occurs due to use of {$place} in generator
-
borrowck_var_mutable_borrow_by_use_place_in_closure =
mutable borrow occurs due to use of {$place} in closure
diff --git a/compiler/rustc_borrowck/src/borrow_set.rs b/compiler/rustc_borrowck/src/borrow_set.rs
index 0b44beeb0..5248a649c 100644
--- a/compiler/rustc_borrowck/src/borrow_set.rs
+++ b/compiler/rustc_borrowck/src/borrow_set.rs
@@ -71,7 +71,7 @@ impl<'tcx> fmt::Display for BorrowData<'tcx> {
fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
let kind = match self.kind {
mir::BorrowKind::Shared => "",
- mir::BorrowKind::Shallow => "shallow ",
+ mir::BorrowKind::Fake => "fake ",
mir::BorrowKind::Mut { kind: mir::MutBorrowKind::ClosureCapture } => "uniq ",
// FIXME: differentiate `TwoPhaseBorrow`
mir::BorrowKind::Mut {
diff --git a/compiler/rustc_borrowck/src/def_use.rs b/compiler/rustc_borrowck/src/def_use.rs
index b719a610e..95db93742 100644
--- a/compiler/rustc_borrowck/src/def_use.rs
+++ b/compiler/rustc_borrowck/src/def_use.rs
@@ -49,7 +49,7 @@ pub fn categorize(context: PlaceContext) -> Option<DefUse> {
// cross suspension points so this behavior is unproblematic.
PlaceContext::MutatingUse(MutatingUseContext::Borrow) |
PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow) |
- PlaceContext::NonMutatingUse(NonMutatingUseContext::ShallowBorrow) |
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::FakeBorrow) |
// `PlaceMention` and `AscribeUserType` both evaluate the place, which must not
// contain dangling references.
diff --git a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
index fe4a45b38..ee352e911 100644
--- a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
@@ -1025,7 +1025,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
self.cannot_uniquely_borrow_by_two_closures(span, &desc_place, issued_span, None)
}
- (BorrowKind::Mut { .. }, BorrowKind::Shallow) => {
+ (BorrowKind::Mut { .. }, BorrowKind::Fake) => {
if let Some(immutable_section_description) =
self.classify_immutable_section(issued_borrow.assigned_place)
{
@@ -1117,11 +1117,10 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
)
}
- (BorrowKind::Shared, BorrowKind::Shared | BorrowKind::Shallow)
- | (
- BorrowKind::Shallow,
- BorrowKind::Mut { .. } | BorrowKind::Shared | BorrowKind::Shallow,
- ) => unreachable!(),
+ (BorrowKind::Shared, BorrowKind::Shared | BorrowKind::Fake)
+ | (BorrowKind::Fake, BorrowKind::Mut { .. } | BorrowKind::Shared | BorrowKind::Fake) => {
+ unreachable!()
+ }
};
if issued_spans == borrow_spans {
@@ -2130,21 +2129,27 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
/// misleading users in cases like `tests/ui/nll/borrowed-temporary-error.rs`.
/// We could expand the analysis to suggest hoising all of the relevant parts of
/// the users' code to make the code compile, but that could be too much.
- struct NestedStatementVisitor {
+ /// We found the `prop_expr` by the way to check whether the expression is a `FormatArguments`,
+ /// which is a special case since it's generated by the compiler.
+ struct NestedStatementVisitor<'tcx> {
span: Span,
current: usize,
found: usize,
+ prop_expr: Option<&'tcx hir::Expr<'tcx>>,
}
- impl<'tcx> Visitor<'tcx> for NestedStatementVisitor {
- fn visit_block(&mut self, block: &hir::Block<'tcx>) {
+ impl<'tcx> Visitor<'tcx> for NestedStatementVisitor<'tcx> {
+ fn visit_block(&mut self, block: &'tcx hir::Block<'tcx>) {
self.current += 1;
walk_block(self, block);
self.current -= 1;
}
- fn visit_expr(&mut self, expr: &hir::Expr<'tcx>) {
+ fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
if self.span == expr.span.source_callsite() {
self.found = self.current;
+ if self.prop_expr.is_none() {
+ self.prop_expr = Some(expr);
+ }
}
walk_expr(self, expr);
}
@@ -2162,22 +2167,40 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
span: proper_span,
current: 0,
found: 0,
+ prop_expr: None,
};
visitor.visit_stmt(stmt);
+
+ let typeck_results = self.infcx.tcx.typeck(self.mir_def_id());
+ let expr_ty: Option<Ty<'_>> = visitor.prop_expr.map(|expr| typeck_results.expr_ty(expr).peel_refs());
+
+ let is_format_arguments_item =
+ if let Some(expr_ty) = expr_ty
+ && let ty::Adt(adt, _) = expr_ty.kind() {
+ self.infcx.tcx.lang_items().get(LangItem::FormatArguments) == Some(adt.did())
+ } else {
+ false
+ };
+
if visitor.found == 0
&& stmt.span.contains(proper_span)
&& let Some(p) = sm.span_to_margin(stmt.span)
&& let Ok(s) = sm.span_to_snippet(proper_span)
{
- let addition = format!("let binding = {};\n{}", s, " ".repeat(p));
- err.multipart_suggestion_verbose(
- msg,
- vec![
- (stmt.span.shrink_to_lo(), addition),
- (proper_span, "binding".to_string()),
- ],
- Applicability::MaybeIncorrect,
- );
+ if !is_format_arguments_item {
+ let addition = format!("let binding = {};\n{}", s, " ".repeat(p));
+ err.multipart_suggestion_verbose(
+ msg,
+ vec![
+ (stmt.span.shrink_to_lo(), addition),
+ (proper_span, "binding".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.note("the result of `format_args!` can only be assigned directly if no placeholders in it's arguments are used");
+ err.note("to learn more, visit <https://doc.rust-lang.org/std/macro.format_args.html>");
+ }
suggested = true;
break;
}
@@ -2620,7 +2643,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
let loan_span = loan_spans.args_or_use();
let descr_place = self.describe_any_place(place.as_ref());
- if loan.kind == BorrowKind::Shallow {
+ if loan.kind == BorrowKind::Fake {
if let Some(section) = self.classify_immutable_section(loan.assigned_place) {
let mut err = self.cannot_mutate_in_immutable_section(
span,
@@ -2804,6 +2827,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
}
ProjectionElem::ConstantIndex { .. }
| ProjectionElem::Subslice { .. }
+ | ProjectionElem::Subtype(_)
| ProjectionElem::Index(_) => kind,
},
place_ty.projection_ty(tcx, elem),
diff --git a/compiler/rustc_borrowck/src/diagnostics/mod.rs b/compiler/rustc_borrowck/src/diagnostics/mod.rs
index 099e07e88..8d4028de9 100644
--- a/compiler/rustc_borrowck/src/diagnostics/mod.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/mod.rs
@@ -13,7 +13,7 @@ use rustc_index::IndexSlice;
use rustc_infer::infer::LateBoundRegionConversionTime;
use rustc_middle::mir::tcx::PlaceTy;
use rustc_middle::mir::{
- AggregateKind, CallSource, Constant, FakeReadCause, Local, LocalInfo, LocalKind, Location,
+ AggregateKind, CallSource, ConstOperand, FakeReadCause, Local, LocalInfo, LocalKind, Location,
Operand, Place, PlaceRef, ProjectionElem, Rvalue, Statement, StatementKind, Terminator,
TerminatorKind,
};
@@ -101,12 +101,12 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
let terminator = self.body[location.block].terminator();
debug!("add_moved_or_invoked_closure_note: terminator={:?}", terminator);
if let TerminatorKind::Call {
- func: Operand::Constant(box Constant { literal, .. }),
+ func: Operand::Constant(box ConstOperand { const_, .. }),
args,
..
} = &terminator.kind
{
- if let ty::FnDef(id, _) = *literal.ty().kind() {
+ if let ty::FnDef(id, _) = *const_.ty().kind() {
debug!("add_moved_or_invoked_closure_note: id={:?}", id);
if Some(self.infcx.tcx.parent(id)) == self.infcx.tcx.lang_items().fn_once_trait() {
let closure = match args.first() {
@@ -242,6 +242,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
ProjectionElem::Downcast(..) if opt.including_downcast => return None,
ProjectionElem::Downcast(..) => (),
ProjectionElem::OpaqueCast(..) => (),
+ ProjectionElem::Subtype(..) => (),
ProjectionElem::Field(field, _ty) => {
// FIXME(project-rfc_2229#36): print capture precisely here.
if let Some(field) = self.is_upvar_field_projection(PlaceRef {
@@ -322,7 +323,9 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
PlaceRef { local, projection: proj_base }.ty(self.body, self.infcx.tcx)
}
ProjectionElem::Downcast(..) => place.ty(self.body, self.infcx.tcx),
- ProjectionElem::OpaqueCast(ty) => PlaceTy::from_ty(*ty),
+ ProjectionElem::Subtype(ty) | ProjectionElem::OpaqueCast(ty) => {
+ PlaceTy::from_ty(*ty)
+ }
ProjectionElem::Field(_, field_type) => PlaceTy::from_ty(*field_type),
},
};
@@ -628,7 +631,7 @@ impl UseSpans<'_> {
err.subdiagnostic(match kind {
Some(kd) => match kd {
rustc_middle::mir::BorrowKind::Shared
- | rustc_middle::mir::BorrowKind::Shallow => {
+ | rustc_middle::mir::BorrowKind::Fake => {
CaptureVarKind::Immut { kind_span: capture_kind_span }
}
diff --git a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs
index d62541daf..8ca57383e 100644
--- a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs
@@ -1,9 +1,10 @@
+use hir::ExprKind;
use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed};
use rustc_hir as hir;
use rustc_hir::intravisit::Visitor;
use rustc_hir::Node;
use rustc_middle::mir::{Mutability, Place, PlaceRef, ProjectionElem};
-use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{self, InstanceDef, Ty, TyCtxt};
use rustc_middle::{
hir::place::PlaceBase,
mir::{self, BindingForm, Local, LocalDecl, LocalInfo, LocalKind, Location},
@@ -158,6 +159,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
[
..,
ProjectionElem::Index(_)
+ | ProjectionElem::Subtype(_)
| ProjectionElem::ConstantIndex { .. }
| ProjectionElem::OpaqueCast { .. }
| ProjectionElem::Subslice { .. }
@@ -225,17 +227,17 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
}
if suggest {
borrow_spans.var_subdiag(
- None,
- &mut err,
- Some(mir::BorrowKind::Mut { kind: mir::MutBorrowKind::Default }),
- |_kind, var_span| {
- let place = self.describe_any_place(access_place.as_ref());
- crate::session_diagnostics::CaptureVarCause::MutableBorrowUsePlaceClosure {
- place,
- var_span,
- }
- },
- );
+ None,
+ &mut err,
+ Some(mir::BorrowKind::Mut { kind: mir::MutBorrowKind::Default }),
+ |_kind, var_span| {
+ let place = self.describe_any_place(access_place.as_ref());
+ crate::session_diagnostics::CaptureVarCause::MutableBorrowUsePlaceClosure {
+ place,
+ var_span,
+ }
+ },
+ );
}
borrow_span
}
@@ -262,11 +264,8 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
} => {
err.span_label(span, format!("cannot {act}"));
- if let Some(span) = get_mut_span_in_struct_field(
- self.infcx.tcx,
- Place::ty_from(local, proj_base, self.body, self.infcx.tcx).ty,
- *field,
- ) {
+ let place = Place::ty_from(local, proj_base, self.body, self.infcx.tcx);
+ if let Some(span) = get_mut_span_in_struct_field(self.infcx.tcx, place.ty, *field) {
err.span_suggestion_verbose(
span,
"consider changing this to be mutable",
@@ -373,12 +372,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
err.span_label(span, format!("cannot {act}"));
}
if suggest {
- err.span_suggestion_verbose(
- local_decl.source_info.span.shrink_to_lo(),
- "consider changing this to be mutable",
- "mut ",
- Applicability::MachineApplicable,
- );
+ self.construct_mut_suggestion_for_local_binding_patterns(&mut err, local);
let tcx = self.infcx.tcx;
if let ty::Closure(id, _) = *the_place_err.ty(self.body, tcx).ty.kind() {
self.show_mutating_upvar(tcx, id.expect_local(), the_place_err, &mut err);
@@ -494,6 +488,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
),
);
+ self.suggest_using_iter_mut(&mut err);
self.suggest_make_local_mut(&mut err, local, name);
}
_ => {
@@ -713,6 +708,83 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
)
}
+ fn construct_mut_suggestion_for_local_binding_patterns(
+ &self,
+ err: &mut DiagnosticBuilder<'_, ErrorGuaranteed>,
+ local: Local,
+ ) {
+ let local_decl = &self.body.local_decls[local];
+ debug!("local_decl: {:?}", local_decl);
+ let pat_span = match *local_decl.local_info() {
+ LocalInfo::User(BindingForm::Var(mir::VarBindingForm {
+ binding_mode: ty::BindingMode::BindByValue(Mutability::Not),
+ opt_ty_info: _,
+ opt_match_place: _,
+ pat_span,
+ })) => pat_span,
+ _ => local_decl.source_info.span,
+ };
+
+ struct BindingFinder {
+ span: Span,
+ hir_id: Option<hir::HirId>,
+ }
+
+ impl<'tcx> Visitor<'tcx> for BindingFinder {
+ fn visit_stmt(&mut self, s: &'tcx hir::Stmt<'tcx>) {
+ if let hir::StmtKind::Local(local) = s.kind {
+ if local.pat.span == self.span {
+ self.hir_id = Some(local.hir_id);
+ }
+ }
+ hir::intravisit::walk_stmt(self, s);
+ }
+ }
+
+ let hir_map = self.infcx.tcx.hir();
+ let def_id = self.body.source.def_id();
+ let hir_id = if let Some(local_def_id) = def_id.as_local()
+ && let Some(body_id) = hir_map.maybe_body_owned_by(local_def_id)
+ {
+ let body = hir_map.body(body_id);
+ let mut v = BindingFinder {
+ span: pat_span,
+ hir_id: None,
+ };
+ v.visit_body(body);
+ v.hir_id
+ } else {
+ None
+ };
+
+ // With ref-binding patterns, the mutability suggestion has to apply to
+ // the binding, not the reference (which would be a type error):
+ //
+ // `let &b = a;` -> `let &(mut b) = a;`
+ if let Some(hir_id) = hir_id
+ && let Some(hir::Node::Local(hir::Local {
+ pat: hir::Pat { kind: hir::PatKind::Ref(_, _), .. },
+ ..
+ })) = hir_map.find(hir_id)
+ && let Ok(name) = self.infcx.tcx.sess.source_map().span_to_snippet(local_decl.source_info.span)
+ {
+ err.span_suggestion(
+ pat_span,
+ "consider changing this to be mutable",
+ format!("&(mut {name})"),
+ Applicability::MachineApplicable,
+ );
+ return;
+ }
+
+ err.span_suggestion_verbose(
+ local_decl.source_info.span.shrink_to_lo(),
+ "consider changing this to be mutable",
+ "mut ",
+ Applicability::MachineApplicable,
+ );
+ }
+
// point to span of upvar making closure call require mutable borrow
fn show_mutating_upvar(
&self,
@@ -781,83 +853,88 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
// Attempt to search similar mutable associated items for suggestion.
// In the future, attempt in all path but initially for RHS of for_loop
- fn suggest_similar_mut_method_for_for_loop(&self, err: &mut Diagnostic) {
+ fn suggest_similar_mut_method_for_for_loop(&self, err: &mut Diagnostic, span: Span) {
use hir::{
- Expr,
- ExprKind::{Block, Call, DropTemps, Match, MethodCall},
+ BorrowKind, Expr,
+ ExprKind::{AddrOf, Block, Call, MethodCall},
};
let hir_map = self.infcx.tcx.hir();
- if let Some(body_id) = hir_map.maybe_body_owned_by(self.mir_def_id()) {
- if let Block(
- hir::Block {
- expr:
- Some(Expr {
- kind:
- DropTemps(Expr {
- kind:
- Match(
- Expr {
- kind:
- Call(
- _,
- [
- Expr {
- kind:
- MethodCall(path_segment, _, _, span),
- hir_id,
- ..
- },
- ..,
- ],
- ),
- ..
- },
- ..,
- ),
- ..
- }),
- ..
- }),
- ..
- },
- _,
- ) = hir_map.body(body_id).value.kind
- {
- let opt_suggestions = self
- .infcx
- .tcx
- .typeck(path_segment.hir_id.owner.def_id)
- .type_dependent_def_id(*hir_id)
- .and_then(|def_id| self.infcx.tcx.impl_of_method(def_id))
- .map(|def_id| self.infcx.tcx.associated_items(def_id))
- .map(|assoc_items| {
- assoc_items
- .in_definition_order()
- .map(|assoc_item_def| assoc_item_def.ident(self.infcx.tcx))
- .filter(|&ident| {
- let original_method_ident = path_segment.ident;
- original_method_ident != ident
- && ident
- .as_str()
- .starts_with(&original_method_ident.name.to_string())
- })
- .map(|ident| format!("{ident}()"))
- .peekable()
- });
+ struct Finder<'tcx> {
+ span: Span,
+ expr: Option<&'tcx Expr<'tcx>>,
+ }
- if let Some(mut suggestions) = opt_suggestions
- && suggestions.peek().is_some()
- {
- err.span_suggestions(
- *span,
- "use mutable method",
- suggestions,
- Applicability::MaybeIncorrect,
- );
+ impl<'tcx> Visitor<'tcx> for Finder<'tcx> {
+ fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) {
+ if e.span == self.span && self.expr.is_none() {
+ self.expr = Some(e);
}
+ hir::intravisit::walk_expr(self, e);
}
- };
+ }
+ if let Some(body_id) = hir_map.maybe_body_owned_by(self.mir_def_id())
+ && let Block(block, _) = hir_map.body(body_id).value.kind
+ {
+ // `span` corresponds to the expression being iterated, find the `for`-loop desugared
+ // expression with that span in order to identify potential fixes when encountering a
+ // read-only iterator that should be mutable.
+ let mut v = Finder {
+ span,
+ expr: None,
+ };
+ v.visit_block(block);
+ if let Some(expr) = v.expr && let Call(_, [expr]) = expr.kind {
+ match expr.kind {
+ MethodCall(path_segment, _, _, span) => {
+ // We have `for _ in iter.read_only_iter()`, try to
+ // suggest `for _ in iter.mutable_iter()` instead.
+ let opt_suggestions = self
+ .infcx
+ .tcx
+ .typeck(path_segment.hir_id.owner.def_id)
+ .type_dependent_def_id(expr.hir_id)
+ .and_then(|def_id| self.infcx.tcx.impl_of_method(def_id))
+ .map(|def_id| self.infcx.tcx.associated_items(def_id))
+ .map(|assoc_items| {
+ assoc_items
+ .in_definition_order()
+ .map(|assoc_item_def| assoc_item_def.ident(self.infcx.tcx))
+ .filter(|&ident| {
+ let original_method_ident = path_segment.ident;
+ original_method_ident != ident
+ && ident.as_str().starts_with(
+ &original_method_ident.name.to_string(),
+ )
+ })
+ .map(|ident| format!("{ident}()"))
+ .peekable()
+ });
+
+ if let Some(mut suggestions) = opt_suggestions
+ && suggestions.peek().is_some()
+ {
+ err.span_suggestions(
+ span,
+ "use mutable method",
+ suggestions,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ AddrOf(BorrowKind::Ref, Mutability::Not, expr) => {
+ // We have `for _ in &i`, suggest `for _ in &mut i`.
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_lo(),
+ "use a mutable iterator instead",
+ "mut ".to_string(),
+ Applicability::MachineApplicable,
+ );
+ }
+ _ => {}
+ }
+ }
+ }
}
/// Targeted error when encountering an `FnMut` closure where an `Fn` closure was expected.
@@ -951,6 +1028,44 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
}
}
+ fn suggest_using_iter_mut(&self, err: &mut DiagnosticBuilder<'_, ErrorGuaranteed>) {
+ let source = self.body.source;
+ let hir = self.infcx.tcx.hir();
+ if let InstanceDef::Item(def_id) = source.instance
+ && let Some(Node::Expr(hir::Expr { hir_id, kind, ..})) = hir.get_if_local(def_id)
+ && let ExprKind::Closure(closure) = kind && closure.movability == None
+ && let Some(Node::Expr(expr)) = hir.find_parent(*hir_id) {
+ let mut cur_expr = expr;
+ while let ExprKind::MethodCall(path_segment, recv, _, _) = cur_expr.kind {
+ if path_segment.ident.name == sym::iter {
+ // check `_ty` has `iter_mut` method
+ let res = self
+ .infcx
+ .tcx
+ .typeck(path_segment.hir_id.owner.def_id)
+ .type_dependent_def_id(cur_expr.hir_id)
+ .and_then(|def_id| self.infcx.tcx.impl_of_method(def_id))
+ .map(|def_id| self.infcx.tcx.associated_items(def_id))
+ .map(|assoc_items| {
+ assoc_items.filter_by_name_unhygienic(sym::iter_mut).peekable()
+ });
+
+ if let Some(mut res) = res && res.peek().is_some() {
+ err.span_suggestion_verbose(
+ path_segment.ident.span,
+ "you may want to use `iter_mut` here",
+ "iter_mut",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ break;
+ } else {
+ cur_expr = recv;
+ }
+ }
+ }
+ }
+
fn suggest_make_local_mut(
&self,
err: &mut DiagnosticBuilder<'_, ErrorGuaranteed>,
@@ -1003,9 +1118,10 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
match opt_assignment_rhs_span.and_then(|s| s.desugaring_kind()) {
// on for loops, RHS points to the iterator part
Some(DesugaringKind::ForLoop) => {
- self.suggest_similar_mut_method_for_for_loop(err);
+ let span = opt_assignment_rhs_span.unwrap();
+ self.suggest_similar_mut_method_for_for_loop(err, span);
err.span_label(
- opt_assignment_rhs_span.unwrap(),
+ span,
format!("this iterator yields `{pointer_sigil}` {pointer_desc}s",),
);
None
diff --git a/compiler/rustc_borrowck/src/diagnostics/region_errors.rs b/compiler/rustc_borrowck/src/diagnostics/region_errors.rs
index 2ea399789..27072a60f 100644
--- a/compiler/rustc_borrowck/src/diagnostics/region_errors.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/region_errors.rs
@@ -245,7 +245,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
let Trait(PolyTraitRef { trait_ref, span: trait_span, .. }, _) = bound else { return; };
diag.span_note(
*trait_span,
- format!("due to current limitations in the borrow checker, this implies a `'static` lifetime")
+ "due to current limitations in the borrow checker, this implies a `'static` lifetime"
);
let Some(generics_fn) = hir.get_generics(self.body.source.def_id().expect_local()) else { return; };
let Def(_, trait_res_defid) = trait_ref.path.res else { return; };
@@ -277,7 +277,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
if suggestions.len() > 0 {
suggestions.dedup();
diag.multipart_suggestion_verbose(
- format!("consider restricting the type parameter to the `'static` lifetime"),
+ "consider restricting the type parameter to the `'static` lifetime",
suggestions,
Applicability::MaybeIncorrect,
);
diff --git a/compiler/rustc_borrowck/src/diagnostics/region_name.rs b/compiler/rustc_borrowck/src/diagnostics/region_name.rs
index 337af89b2..55d581b3a 100644
--- a/compiler/rustc_borrowck/src/diagnostics/region_name.rs
+++ b/compiler/rustc_borrowck/src/diagnostics/region_name.rs
@@ -27,7 +27,7 @@ pub(crate) struct RegionName {
/// This helps to print the right kinds of diagnostics.
#[derive(Debug, Clone)]
pub(crate) enum RegionNameSource {
- /// A bound (not free) region that was substituted at the def site (not an HRTB).
+ /// A bound (not free) region that was instantiated at the def site (not an HRTB).
NamedEarlyBoundRegion(Span),
/// A free region that the user has a name (`'a`) for.
NamedFreeRegion(Span),
@@ -302,7 +302,7 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
if free_region.bound_region.is_named() {
// A named region that is actually named.
Some(RegionName { name, source: RegionNameSource::NamedFreeRegion(span) })
- } else if let hir::IsAsync::Async = tcx.asyncness(self.mir_hir_id().owner) {
+ } else if tcx.asyncness(self.mir_hir_id().owner).is_async() {
// If we spuriously thought that the region is named, we should let the
// system generate a true name for error messages. Currently this can
// happen if we have an elided name in an async fn for example: the
@@ -354,7 +354,7 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
})
}
- ty::BoundRegionKind::BrAnon(..) => None,
+ ty::BoundRegionKind::BrAnon => None,
},
ty::ReLateBound(..)
@@ -442,8 +442,8 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
span: Span,
counter: usize,
) -> RegionNameHighlight {
- let mut highlight = RegionHighlightMode::new(self.infcx.tcx);
- highlight.highlighting_region_vid(needle_fr, counter);
+ let mut highlight = RegionHighlightMode::default();
+ highlight.highlighting_region_vid(self.infcx.tcx, needle_fr, counter);
let type_name =
self.infcx.extract_inference_diagnostics_data(ty.into(), Some(highlight)).name;
@@ -516,7 +516,7 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
// be the same as those of the ADT.
// FIXME: We should be able to do something similar to
// match_adt_and_segment in this case.
- Res::Def(DefKind::TyAlias { .. }, _) => (),
+ Res::Def(DefKind::TyAlias, _) => (),
_ => {
if let Some(last_segment) = path.segments.last() {
if let Some(highlight) = self.match_adt_and_segment(
@@ -619,7 +619,7 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
// programs, so we need to use delay_span_bug here. See #82126.
self.infcx.tcx.sess.delay_span_bug(
hir_arg.span(),
- format!("unmatched subst and hir arg: found {kind:?} vs {hir_arg:?}"),
+ format!("unmatched arg and hir arg: found {kind:?} vs {hir_arg:?}"),
);
}
}
@@ -804,8 +804,8 @@ impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
return None;
}
- let mut highlight = RegionHighlightMode::new(tcx);
- highlight.highlighting_region_vid(fr, *self.next_region_name.try_borrow().unwrap());
+ let mut highlight = RegionHighlightMode::default();
+ highlight.highlighting_region_vid(tcx, fr, *self.next_region_name.try_borrow().unwrap());
let type_name =
self.infcx.extract_inference_diagnostics_data(yield_ty.into(), Some(highlight)).name;
diff --git a/compiler/rustc_borrowck/src/invalidation.rs b/compiler/rustc_borrowck/src/invalidation.rs
index df5e383ad..2faf1a529 100644
--- a/compiler/rustc_borrowck/src/invalidation.rs
+++ b/compiler/rustc_borrowck/src/invalidation.rs
@@ -159,7 +159,9 @@ impl<'cx, 'tcx> Visitor<'tcx> for InvalidationGenerator<'cx, 'tcx> {
self.mutate_place(location, *resume_arg, Deep);
}
- TerminatorKind::Resume | TerminatorKind::Return | TerminatorKind::GeneratorDrop => {
+ TerminatorKind::UnwindResume
+ | TerminatorKind::Return
+ | TerminatorKind::GeneratorDrop => {
// Invalidate all borrows of local places
let borrow_set = self.borrow_set;
let start = self.location_table.start_index(location);
@@ -200,7 +202,7 @@ impl<'cx, 'tcx> Visitor<'tcx> for InvalidationGenerator<'cx, 'tcx> {
}
}
TerminatorKind::Goto { target: _ }
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Unreachable
| TerminatorKind::FalseEdge { real_target: _, imaginary_target: _ }
| TerminatorKind::FalseUnwind { real_target: _, unwind: _ } => {
@@ -251,8 +253,8 @@ impl<'cx, 'tcx> InvalidationGenerator<'cx, 'tcx> {
match rvalue {
&Rvalue::Ref(_ /*rgn*/, bk, place) => {
let access_kind = match bk {
- BorrowKind::Shallow => {
- (Shallow(Some(ArtificialField::ShallowBorrow)), Read(ReadKind::Borrow(bk)))
+ BorrowKind::Fake => {
+ (Shallow(Some(ArtificialField::FakeBorrow)), Read(ReadKind::Borrow(bk)))
}
BorrowKind::Shared => (Deep, Read(ReadKind::Borrow(bk))),
BorrowKind::Mut { .. } => {
@@ -374,8 +376,8 @@ impl<'cx, 'tcx> InvalidationGenerator<'cx, 'tcx> {
// have already taken the reservation
}
- (Read(_), BorrowKind::Shallow | BorrowKind::Shared)
- | (Read(ReadKind::Borrow(BorrowKind::Shallow)), BorrowKind::Mut { .. }) => {
+ (Read(_), BorrowKind::Fake | BorrowKind::Shared)
+ | (Read(ReadKind::Borrow(BorrowKind::Fake)), BorrowKind::Mut { .. }) => {
// Reads don't invalidate shared or shallow borrows
}
@@ -420,7 +422,7 @@ impl<'cx, 'tcx> InvalidationGenerator<'cx, 'tcx> {
// only mutable borrows should be 2-phase
assert!(match borrow.kind {
- BorrowKind::Shared | BorrowKind::Shallow => false,
+ BorrowKind::Shared | BorrowKind::Fake => false,
BorrowKind::Mut { .. } => true,
});
diff --git a/compiler/rustc_borrowck/src/lib.rs b/compiler/rustc_borrowck/src/lib.rs
index efe525c22..1d17df8b7 100644
--- a/compiler/rustc_borrowck/src/lib.rs
+++ b/compiler/rustc_borrowck/src/lib.rs
@@ -11,7 +11,7 @@
#![feature(trusted_step)]
#![feature(try_blocks)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#[macro_use]
extern crate rustc_middle;
@@ -603,7 +603,7 @@ impl<'cx, 'tcx, R> rustc_mir_dataflow::ResultsVisitor<'cx, 'tcx, R> for MirBorro
fn visit_statement_before_primary_effect(
&mut self,
- _results: &R,
+ _results: &mut R,
flow_state: &Flows<'cx, 'tcx>,
stmt: &'cx Statement<'tcx>,
location: Location,
@@ -673,7 +673,7 @@ impl<'cx, 'tcx, R> rustc_mir_dataflow::ResultsVisitor<'cx, 'tcx, R> for MirBorro
fn visit_terminator_before_primary_effect(
&mut self,
- _results: &R,
+ _results: &mut R,
flow_state: &Flows<'cx, 'tcx>,
term: &'cx Terminator<'tcx>,
loc: Location,
@@ -770,9 +770,9 @@ impl<'cx, 'tcx, R> rustc_mir_dataflow::ResultsVisitor<'cx, 'tcx, R> for MirBorro
}
TerminatorKind::Goto { target: _ }
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Unreachable
- | TerminatorKind::Resume
+ | TerminatorKind::UnwindResume
| TerminatorKind::Return
| TerminatorKind::GeneratorDrop
| TerminatorKind::FalseEdge { real_target: _, imaginary_target: _ }
@@ -784,7 +784,7 @@ impl<'cx, 'tcx, R> rustc_mir_dataflow::ResultsVisitor<'cx, 'tcx, R> for MirBorro
fn visit_terminator_after_primary_effect(
&mut self,
- _results: &R,
+ _results: &mut R,
flow_state: &Flows<'cx, 'tcx>,
term: &'cx Terminator<'tcx>,
loc: Location,
@@ -803,7 +803,9 @@ impl<'cx, 'tcx, R> rustc_mir_dataflow::ResultsVisitor<'cx, 'tcx, R> for MirBorro
}
}
- TerminatorKind::Resume | TerminatorKind::Return | TerminatorKind::GeneratorDrop => {
+ TerminatorKind::UnwindResume
+ | TerminatorKind::Return
+ | TerminatorKind::GeneratorDrop => {
// Returning from the function implicitly kills storage for all locals and statics.
// Often, the storage will already have been killed by an explicit
// StorageDead, but we don't always emit those (notably on unwind paths),
@@ -815,7 +817,7 @@ impl<'cx, 'tcx, R> rustc_mir_dataflow::ResultsVisitor<'cx, 'tcx, R> for MirBorro
}
}
- TerminatorKind::Terminate
+ TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Assert { .. }
| TerminatorKind::Call { .. }
| TerminatorKind::Drop { .. }
@@ -835,7 +837,7 @@ use self::ReadOrWrite::{Activation, Read, Reservation, Write};
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum ArtificialField {
ArrayLength,
- ShallowBorrow,
+ FakeBorrow,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
@@ -1074,18 +1076,18 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
Control::Continue
}
- (Read(_), BorrowKind::Shared | BorrowKind::Shallow)
- | (Read(ReadKind::Borrow(BorrowKind::Shallow)), BorrowKind::Mut { .. }) => {
+ (Read(_), BorrowKind::Shared | BorrowKind::Fake)
+ | (Read(ReadKind::Borrow(BorrowKind::Fake)), BorrowKind::Mut { .. }) => {
Control::Continue
}
- (Reservation(_), BorrowKind::Shallow | BorrowKind::Shared) => {
+ (Reservation(_), BorrowKind::Fake | BorrowKind::Shared) => {
// This used to be a future compatibility warning (to be
// disallowed on NLL). See rust-lang/rust#56254
Control::Continue
}
- (Write(WriteKind::Move), BorrowKind::Shallow) => {
+ (Write(WriteKind::Move), BorrowKind::Fake) => {
// Handled by initialization checks.
Control::Continue
}
@@ -1193,8 +1195,8 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
match rvalue {
&Rvalue::Ref(_ /*rgn*/, bk, place) => {
let access_kind = match bk {
- BorrowKind::Shallow => {
- (Shallow(Some(ArtificialField::ShallowBorrow)), Read(ReadKind::Borrow(bk)))
+ BorrowKind::Fake => {
+ (Shallow(Some(ArtificialField::FakeBorrow)), Read(ReadKind::Borrow(bk)))
}
BorrowKind::Shared => (Deep, Read(ReadKind::Borrow(bk))),
BorrowKind::Mut { .. } => {
@@ -1215,7 +1217,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
flow_state,
);
- let action = if bk == BorrowKind::Shallow {
+ let action = if bk == BorrowKind::Fake {
InitializationRequiringAction::MatchOn
} else {
InitializationRequiringAction::Borrow
@@ -1567,7 +1569,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
// only mutable borrows should be 2-phase
assert!(match borrow.kind {
- BorrowKind::Shared | BorrowKind::Shallow => false,
+ BorrowKind::Shared | BorrowKind::Fake => false,
BorrowKind::Mut { .. } => true,
});
@@ -1801,6 +1803,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
for (place_base, elem) in place.iter_projections().rev() {
match elem {
ProjectionElem::Index(_/*operand*/) |
+ ProjectionElem::Subtype(_) |
ProjectionElem::OpaqueCast(_) |
ProjectionElem::ConstantIndex { .. } |
// assigning to P[i] requires P to be valid.
@@ -2000,14 +2003,14 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
| WriteKind::Replace
| WriteKind::StorageDeadOrDrop
| WriteKind::MutableBorrow(BorrowKind::Shared)
- | WriteKind::MutableBorrow(BorrowKind::Shallow),
+ | WriteKind::MutableBorrow(BorrowKind::Fake),
)
| Write(
WriteKind::Move
| WriteKind::Replace
| WriteKind::StorageDeadOrDrop
| WriteKind::MutableBorrow(BorrowKind::Shared)
- | WriteKind::MutableBorrow(BorrowKind::Shallow),
+ | WriteKind::MutableBorrow(BorrowKind::Fake),
) => {
if self.is_mutable(place.as_ref(), is_local_mutation_allowed).is_err()
&& !self.has_buffered_errors()
@@ -2031,7 +2034,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
return false;
}
Read(
- ReadKind::Borrow(BorrowKind::Mut { .. } | BorrowKind::Shared | BorrowKind::Shallow)
+ ReadKind::Borrow(BorrowKind::Mut { .. } | BorrowKind::Shared | BorrowKind::Fake)
| ReadKind::Copy,
) => {
// Access authorized
@@ -2189,6 +2192,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
| ProjectionElem::Index(..)
| ProjectionElem::ConstantIndex { .. }
| ProjectionElem::Subslice { .. }
+ | ProjectionElem::Subtype(..)
| ProjectionElem::OpaqueCast { .. }
| ProjectionElem::Downcast(..) => {
let upvar_field_projection = self.is_upvar_field_projection(place);
diff --git a/compiler/rustc_borrowck/src/nll.rs b/compiler/rustc_borrowck/src/nll.rs
index 679a19710..3f60f5aca 100644
--- a/compiler/rustc_borrowck/src/nll.rs
+++ b/compiler/rustc_borrowck/src/nll.rs
@@ -10,6 +10,7 @@ use rustc_middle::mir::{
Body, ClosureOutlivesSubject, ClosureRegionRequirements, LocalKind, Location, Promoted,
START_BLOCK,
};
+use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::{self, OpaqueHiddenType, TyCtxt};
use rustc_span::symbol::sym;
use std::env;
@@ -441,7 +442,10 @@ fn for_each_region_constraint<'tcx>(
let subject = match req.subject {
ClosureOutlivesSubject::Region(subject) => format!("{subject:?}"),
ClosureOutlivesSubject::Ty(ty) => {
- format!("{:?}", ty.instantiate(tcx, |vid| ty::Region::new_var(tcx, vid)))
+ with_no_trimmed_paths!(format!(
+ "{}",
+ ty.instantiate(tcx, |vid| ty::Region::new_var(tcx, vid))
+ ))
}
};
with_msg(format!("where {}: {:?}", subject, req.outlived_free_region,))?;
diff --git a/compiler/rustc_borrowck/src/places_conflict.rs b/compiler/rustc_borrowck/src/places_conflict.rs
index c02f6f3b6..777ebf0d4 100644
--- a/compiler/rustc_borrowck/src/places_conflict.rs
+++ b/compiler/rustc_borrowck/src/places_conflict.rs
@@ -204,7 +204,7 @@ fn place_components_conflict<'tcx>(
match (elem, &base_ty.kind(), access) {
(_, _, Shallow(Some(ArtificialField::ArrayLength)))
- | (_, _, Shallow(Some(ArtificialField::ShallowBorrow))) => {
+ | (_, _, Shallow(Some(ArtificialField::FakeBorrow))) => {
// The array length is like additional fields on the
// type; it does not overlap any existing data there.
// Furthermore, if cannot actually be a prefix of any
@@ -249,6 +249,7 @@ fn place_components_conflict<'tcx>(
| (ProjectionElem::ConstantIndex { .. }, _, _)
| (ProjectionElem::Subslice { .. }, _, _)
| (ProjectionElem::OpaqueCast { .. }, _, _)
+ | (ProjectionElem::Subtype(_), _, _)
| (ProjectionElem::Downcast { .. }, _, _) => {
// Recursive case. This can still be disjoint on a
// further iteration if this a shallow access and
@@ -272,10 +273,10 @@ fn place_components_conflict<'tcx>(
// If the second example, where we did, then we still know
// that the borrow can access a *part* of our place that
// our access cares about, so we still have a conflict.
- if borrow_kind == BorrowKind::Shallow
+ if borrow_kind == BorrowKind::Fake
&& borrow_place.projection.len() < access_place.projection.len()
{
- debug!("borrow_conflicts_with_place: shallow borrow");
+ debug!("borrow_conflicts_with_place: fake borrow");
false
} else {
debug!("borrow_conflicts_with_place: full borrow, CONFLICT");
@@ -508,6 +509,7 @@ fn place_projection_conflict<'tcx>(
| ProjectionElem::Field(..)
| ProjectionElem::Index(..)
| ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subtype(_)
| ProjectionElem::OpaqueCast { .. }
| ProjectionElem::Subslice { .. }
| ProjectionElem::Downcast(..),
diff --git a/compiler/rustc_borrowck/src/prefixes.rs b/compiler/rustc_borrowck/src/prefixes.rs
index 6f2813498..e9c9709bd 100644
--- a/compiler/rustc_borrowck/src/prefixes.rs
+++ b/compiler/rustc_borrowck/src/prefixes.rs
@@ -89,6 +89,9 @@ impl<'cx, 'tcx> Iterator for Prefixes<'cx, 'tcx> {
cursor = cursor_base;
continue 'cursor;
}
+ ProjectionElem::Subtype(..) => {
+ panic!("Subtype projection is not allowed before borrow check")
+ }
ProjectionElem::Deref => {
// (handled below)
}
diff --git a/compiler/rustc_borrowck/src/region_infer/mod.rs b/compiler/rustc_borrowck/src/region_infer/mod.rs
index b8cd94e54..852935676 100644
--- a/compiler/rustc_borrowck/src/region_infer/mod.rs
+++ b/compiler/rustc_borrowck/src/region_infer/mod.rs
@@ -2249,7 +2249,14 @@ impl<'tcx> RegionInferenceContext<'tcx> {
}
pub(crate) fn universe_info(&self, universe: ty::UniverseIndex) -> UniverseInfo<'tcx> {
- self.universe_causes[&universe].clone()
+ // Query canonicalization can create local superuniverses (for example in
+ // `InferCtx::query_response_instantiation_guess`), but they don't have an associated
+ // `UniverseInfo` explaining why they were created.
+ // This can cause ICEs if these causes are accessed in diagnostics, for example in issue
+ // #114907 where this happens via liveness and dropck outlives results.
+ // Therefore, we return a default value in case that happens, which should at worst emit a
+ // suboptimal error, instead of the ICE.
+ self.universe_causes.get(&universe).cloned().unwrap_or_else(|| UniverseInfo::other())
}
/// Tries to find the terminator of the loop in which the region 'r' resides.
diff --git a/compiler/rustc_borrowck/src/renumber.rs b/compiler/rustc_borrowck/src/renumber.rs
index 4c69ea843..5d6f5cc89 100644
--- a/compiler/rustc_borrowck/src/renumber.rs
+++ b/compiler/rustc_borrowck/src/renumber.rs
@@ -4,11 +4,10 @@ use crate::BorrowckInferCtxt;
use rustc_index::IndexSlice;
use rustc_infer::infer::NllRegionVariableOrigin;
use rustc_middle::mir::visit::{MutVisitor, TyContext};
-use rustc_middle::mir::Constant;
-use rustc_middle::mir::{Body, Location, Promoted};
+use rustc_middle::mir::{Body, ConstOperand, Location, Promoted};
use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable};
-use rustc_span::{Span, Symbol};
+use rustc_span::Symbol;
/// Replaces all free regions appearing in the MIR with fresh
/// inference variables, returning the number of variables created.
@@ -30,20 +29,14 @@ pub fn renumber_mir<'tcx>(
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
-pub(crate) enum BoundRegionInfo {
- Name(Symbol),
- Span(Span),
-}
-
-#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub(crate) enum RegionCtxt {
Location(Location),
TyContext(TyContext),
Free(Symbol),
- Bound(BoundRegionInfo),
- LateBound(BoundRegionInfo),
+ Bound(Symbol),
+ LateBound(Symbol),
Existential(Option<Symbol>),
- Placeholder(BoundRegionInfo),
+ Placeholder(Symbol),
Unknown,
}
@@ -117,9 +110,9 @@ impl<'a, 'tcx> MutVisitor<'tcx> for RegionRenumberer<'a, 'tcx> {
}
#[instrument(skip(self), level = "debug")]
- fn visit_constant(&mut self, constant: &mut Constant<'tcx>, location: Location) {
- let literal = constant.literal;
- constant.literal = self.renumber_regions(literal, || RegionCtxt::Location(location));
+ fn visit_constant(&mut self, constant: &mut ConstOperand<'tcx>, location: Location) {
+ let const_ = constant.const_;
+ constant.const_ = self.renumber_regions(const_, || RegionCtxt::Location(location));
debug!("constant: {:#?}", constant);
}
}
diff --git a/compiler/rustc_borrowck/src/session_diagnostics.rs b/compiler/rustc_borrowck/src/session_diagnostics.rs
index d1d8cfa74..ca3ccf439 100644
--- a/compiler/rustc_borrowck/src/session_diagnostics.rs
+++ b/compiler/rustc_borrowck/src/session_diagnostics.rs
@@ -452,3 +452,10 @@ pub(crate) enum TypeNoCopy<'a, 'tcx> {
#[note(borrowck_ty_no_impl_copy)]
Note { is_partial_move: bool, ty: Ty<'tcx>, place: &'a str },
}
+
+#[derive(Diagnostic)]
+#[diag(borrowck_simd_shuffle_last_const)]
+pub(crate) struct SimdShuffleLastConst {
+ #[primary_span]
+ pub span: Span,
+}
diff --git a/compiler/rustc_borrowck/src/type_check/canonical.rs b/compiler/rustc_borrowck/src/type_check/canonical.rs
index 16f5e68a0..b7adc314f 100644
--- a/compiler/rustc_borrowck/src/type_check/canonical.rs
+++ b/compiler/rustc_borrowck/src/type_check/canonical.rs
@@ -9,7 +9,7 @@ use rustc_span::Span;
use rustc_trait_selection::traits::query::type_op::{self, TypeOpOutput};
use rustc_trait_selection::traits::ObligationCause;
-use crate::diagnostics::{ToUniverseInfo, UniverseInfo};
+use crate::diagnostics::ToUniverseInfo;
use super::{Locations, NormalizeLocation, TypeChecker};
@@ -46,13 +46,11 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
self.push_region_constraints(locations, category, data);
}
+ // If the query has created new universes and errors are going to be emitted, register the
+ // cause of these new universes for improved diagnostics.
let universe = self.infcx.universe();
-
- if old_universe != universe {
- let universe_info = match error_info {
- Some(error_info) => error_info.to_universe_info(old_universe),
- None => UniverseInfo::other(),
- };
+ if old_universe != universe && let Some(error_info) = error_info {
+ let universe_info = error_info.to_universe_info(old_universe);
for u in (old_universe + 1)..=universe {
self.borrowck_context.constraints.universe_causes.insert(u, universe_info.clone());
}
@@ -69,15 +67,8 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
where
T: TypeFoldable<TyCtxt<'tcx>>,
{
- let old_universe = self.infcx.universe();
-
let (instantiated, _) =
self.infcx.instantiate_canonical_with_fresh_inference_vars(span, canonical);
-
- for u in (old_universe + 1)..=self.infcx.universe() {
- self.borrowck_context.constraints.universe_causes.insert(u, UniverseInfo::other());
- }
-
instantiated
}
diff --git a/compiler/rustc_borrowck/src/type_check/mod.rs b/compiler/rustc_borrowck/src/type_check/mod.rs
index 50d875dfa..1f383e533 100644
--- a/compiler/rustc_borrowck/src/type_check/mod.rs
+++ b/compiler/rustc_borrowck/src/type_check/mod.rs
@@ -50,7 +50,7 @@ use rustc_mir_dataflow::impls::MaybeInitializedPlaces;
use rustc_mir_dataflow::move_paths::MoveData;
use rustc_mir_dataflow::ResultsCursor;
-use crate::session_diagnostics::MoveUnsized;
+use crate::session_diagnostics::{MoveUnsized, SimdShuffleLastConst};
use crate::{
borrow_set::BorrowSet,
constraints::{OutlivesConstraint, OutlivesConstraintSet},
@@ -163,10 +163,6 @@ pub(crate) fn type_check<'mir, 'tcx>(
debug!(?normalized_inputs_and_output);
- for u in ty::UniverseIndex::ROOT..=infcx.universe() {
- constraints.universe_causes.insert(u, UniverseInfo::other());
- }
-
let mut borrowck_context = BorrowCheckContext {
universal_regions,
location_table,
@@ -306,11 +302,11 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'tcx> {
self.sanitize_place(place, location, context);
}
- fn visit_constant(&mut self, constant: &Constant<'tcx>, location: Location) {
+ fn visit_constant(&mut self, constant: &ConstOperand<'tcx>, location: Location) {
debug!(?constant, ?location, "visit_constant");
self.super_constant(constant, location);
- let ty = self.sanitize_type(constant, constant.literal.ty());
+ let ty = self.sanitize_type(constant, constant.const_.ty());
self.cx.infcx.tcx.for_each_free_region(&ty, |live_region| {
let live_region_vid =
@@ -332,7 +328,7 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'tcx> {
if let Some(annotation_index) = constant.user_ty {
if let Err(terr) = self.cx.relate_type_and_user_type(
- constant.literal.ty(),
+ constant.const_.ty(),
ty::Variance::Invariant,
&UserTypeProjection { base: annotation_index, projs: vec![] },
locations,
@@ -344,20 +340,20 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'tcx> {
constant,
"bad constant user type {:?} vs {:?}: {:?}",
annotation,
- constant.literal.ty(),
+ constant.const_.ty(),
terr,
);
}
} else {
let tcx = self.tcx();
- let maybe_uneval = match constant.literal {
- ConstantKind::Ty(ct) => match ct.kind() {
+ let maybe_uneval = match constant.const_ {
+ Const::Ty(ct) => match ct.kind() {
ty::ConstKind::Unevaluated(_) => {
- bug!("should not encounter unevaluated ConstantKind::Ty here, got {:?}", ct)
+ bug!("should not encounter unevaluated Const::Ty here, got {:?}", ct)
}
_ => None,
},
- ConstantKind::Unevaluated(uv, _) => Some(uv),
+ Const::Unevaluated(uv, _) => Some(uv),
_ => None,
};
@@ -388,7 +384,7 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'tcx> {
check_err(self, promoted_body, ty, promoted_ty);
} else {
self.cx.ascribe_user_type(
- constant.literal.ty(),
+ constant.const_.ty(),
UserType::TypeOf(uv.def, UserArgs { args: uv.args, user_self_ty: None }),
locations.span(&self.cx.body),
);
@@ -396,7 +392,7 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'tcx> {
} else if let Some(static_def_id) = constant.check_static_ptr(tcx) {
let unnormalized_ty = tcx.type_of(static_def_id).instantiate_identity();
let normalized_ty = self.cx.normalize(unnormalized_ty, locations);
- let literal_ty = constant.literal.ty().builtin_deref(true).unwrap().ty;
+ let literal_ty = constant.const_.ty().builtin_deref(true).unwrap().ty;
if let Err(terr) = self.cx.eq_types(
literal_ty,
@@ -408,7 +404,7 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'tcx> {
}
}
- if let ty::FnDef(def_id, args) = *constant.literal.ty().kind() {
+ if let ty::FnDef(def_id, args) = *constant.const_.ty().kind() {
let instantiated_predicates = tcx.predicates_of(def_id).instantiate(tcx, args);
self.cx.normalize_and_prove_instantiated_predicates(
def_id,
@@ -720,6 +716,9 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
}
PlaceTy::from_ty(fty)
}
+ ProjectionElem::Subtype(_) => {
+ bug!("ProjectionElem::Subtype shouldn't exist in borrowck")
+ }
ProjectionElem::OpaqueCast(ty) => {
let ty = self.sanitize_type(place, ty);
let ty = self.cx.normalize(ty, location);
@@ -749,7 +748,7 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
PlaceContext::MutatingUse(_) => ty::Invariant,
PlaceContext::NonUse(StorageDead | StorageLive | VarDebugInfo) => ty::Invariant,
PlaceContext::NonMutatingUse(
- Inspect | Copy | Move | PlaceMention | SharedBorrow | ShallowBorrow | AddressOf
+ Inspect | Copy | Move | PlaceMention | SharedBorrow | FakeBorrow | AddressOf
| Projection,
) => ty::Covariant,
PlaceContext::NonUse(AscribeUserTy(variance)) => variance,
@@ -1011,7 +1010,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
pub(super) fn register_predefined_opaques_in_new_solver(&mut self) {
- // OK to use the identity substitutions for each opaque type key, since
+ // OK to use the identity arguments for each opaque type key, since
// we remap opaques from HIR typeck back to their definition params.
let opaques: Vec<_> = self
.infcx
@@ -1333,8 +1332,8 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
debug!("terminator kind: {:?}", term.kind);
match &term.kind {
TerminatorKind::Goto { .. }
- | TerminatorKind::Resume
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindResume
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
| TerminatorKind::GeneratorDrop
| TerminatorKind::Unreachable
@@ -1371,14 +1370,13 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
};
let (sig, map) = tcx.replace_late_bound_regions(sig, |br| {
- use crate::renumber::{BoundRegionInfo, RegionCtxt};
+ use crate::renumber::RegionCtxt;
let region_ctxt_fn = || {
let reg_info = match br.kind {
- ty::BoundRegionKind::BrAnon(Some(span)) => BoundRegionInfo::Span(span),
- ty::BoundRegionKind::BrAnon(..) => BoundRegionInfo::Name(sym::anon),
- ty::BoundRegionKind::BrNamed(_, name) => BoundRegionInfo::Name(name),
- ty::BoundRegionKind::BrEnv => BoundRegionInfo::Name(sym::env),
+ ty::BoundRegionKind::BrAnon => sym::anon,
+ ty::BoundRegionKind::BrNamed(_, name) => name,
+ ty::BoundRegionKind::BrEnv => sym::env,
};
RegionCtxt::LateBound(reg_info)
@@ -1430,7 +1428,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
.add_element(region_vid, term_location);
}
- self.check_call_inputs(body, term, &sig, args, term_location, *call_source);
+ self.check_call_inputs(body, term, func, &sig, args, term_location, *call_source);
}
TerminatorKind::Assert { cond, msg, .. } => {
self.check_operand(cond, term_location);
@@ -1550,25 +1548,36 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
}
+ #[instrument(level = "debug", skip(self, body, term, func, term_location, call_source))]
fn check_call_inputs(
&mut self,
body: &Body<'tcx>,
term: &Terminator<'tcx>,
+ func: &Operand<'tcx>,
sig: &ty::FnSig<'tcx>,
args: &[Operand<'tcx>],
term_location: Location,
call_source: CallSource,
) {
- debug!("check_call_inputs({:?}, {:?})", sig, args);
if args.len() < sig.inputs().len() || (args.len() > sig.inputs().len() && !sig.c_variadic) {
span_mirbug!(self, term, "call to {:?} with wrong # of args", sig);
}
- let func_ty = if let TerminatorKind::Call { func, .. } = &term.kind {
- Some(func.ty(body, self.infcx.tcx))
- } else {
- None
- };
+ let func_ty = func.ty(body, self.infcx.tcx);
+ if let ty::FnDef(def_id, _) = *func_ty.kind() {
+ if self.tcx().is_intrinsic(def_id) {
+ match self.tcx().item_name(def_id) {
+ sym::simd_shuffle => {
+ if !matches!(args[2], Operand::Constant(_)) {
+ self.tcx()
+ .sess
+ .emit_err(SimdShuffleLastConst { span: term.source_info.span });
+ }
+ }
+ _ => {}
+ }
+ }
+ }
debug!(?func_ty);
for (n, (fn_arg, op_arg)) in iter::zip(sig.inputs(), args).enumerate() {
@@ -1576,7 +1585,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
let op_arg_ty = self.normalize(op_arg_ty, term_location);
let category = if call_source.from_hir_call() {
- ConstraintCategory::CallArgument(self.infcx.tcx.erase_regions(func_ty))
+ ConstraintCategory::CallArgument(Some(self.infcx.tcx.erase_regions(func_ty)))
} else {
ConstraintCategory::Boring
};
@@ -1608,12 +1617,12 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
self.assert_iscleanup(body, block_data, *target, is_cleanup);
}
}
- TerminatorKind::Resume => {
+ TerminatorKind::UnwindResume => {
if !is_cleanup {
span_mirbug!(self, block_data, "resume on non-cleanup block!")
}
}
- TerminatorKind::Terminate => {
+ TerminatorKind::UnwindTerminate(_) => {
if !is_cleanup {
span_mirbug!(self, block_data, "abort on non-cleanup block!")
}
@@ -1697,7 +1706,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
span_mirbug!(self, ctxt, "unwind on cleanup block")
}
}
- UnwindAction::Unreachable | UnwindAction::Terminate => (),
+ UnwindAction::Unreachable | UnwindAction::Terminate(_) => (),
}
}
@@ -1794,9 +1803,9 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
debug!(?op, ?location, "check_operand");
if let Operand::Constant(constant) = op {
- let maybe_uneval = match constant.literal {
- ConstantKind::Val(..) | ConstantKind::Ty(_) => None,
- ConstantKind::Unevaluated(uv, _) => Some(uv),
+ let maybe_uneval = match constant.const_ {
+ Const::Val(..) | Const::Ty(_) => None,
+ Const::Unevaluated(uv, _) => Some(uv),
};
if let Some(uv) = maybe_uneval {
@@ -2557,6 +2566,9 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
| ProjectionElem::Subslice { .. } => {
// other field access
}
+ ProjectionElem::Subtype(_) => {
+ bug!("ProjectionElem::Subtype shouldn't exist in borrowck")
+ }
}
}
}
diff --git a/compiler/rustc_borrowck/src/type_check/relate_tys.rs b/compiler/rustc_borrowck/src/type_check/relate_tys.rs
index e0c629562..c1f82e19c 100644
--- a/compiler/rustc_borrowck/src/type_check/relate_tys.rs
+++ b/compiler/rustc_borrowck/src/type_check/relate_tys.rs
@@ -11,7 +11,7 @@ use rustc_span::{Span, Symbol};
use crate::constraints::OutlivesConstraint;
use crate::diagnostics::UniverseInfo;
-use crate::renumber::{BoundRegionInfo, RegionCtxt};
+use crate::renumber::RegionCtxt;
use crate::type_check::{InstantiateOpaqueType, Locations, TypeChecker};
impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
@@ -126,10 +126,9 @@ impl<'tcx> TypeRelatingDelegate<'tcx> for NllTypeRelatingDelegate<'_, '_, 'tcx>
.placeholder_region(self.type_checker.infcx, placeholder);
let reg_info = match placeholder.bound.kind {
- ty::BoundRegionKind::BrAnon(Some(span)) => BoundRegionInfo::Span(span),
- ty::BoundRegionKind::BrAnon(..) => BoundRegionInfo::Name(sym::anon),
- ty::BoundRegionKind::BrNamed(_, name) => BoundRegionInfo::Name(name),
- ty::BoundRegionKind::BrEnv => BoundRegionInfo::Name(sym::env),
+ ty::BoundRegionKind::BrAnon => sym::anon,
+ ty::BoundRegionKind::BrNamed(_, name) => name,
+ ty::BoundRegionKind::BrEnv => sym::env,
};
if cfg!(debug_assertions) {
diff --git a/compiler/rustc_borrowck/src/universal_regions.rs b/compiler/rustc_borrowck/src/universal_regions.rs
index 56945f43f..af437f36b 100644
--- a/compiler/rustc_borrowck/src/universal_regions.rs
+++ b/compiler/rustc_borrowck/src/universal_regions.rs
@@ -21,13 +21,14 @@ use rustc_hir::BodyOwnerKind;
use rustc_index::IndexVec;
use rustc_infer::infer::NllRegionVariableOrigin;
use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::{self, InlineConstArgs, InlineConstArgsParts, RegionVid, Ty, TyCtxt};
use rustc_middle::ty::{GenericArgs, GenericArgsRef};
use rustc_span::symbol::{kw, sym};
use rustc_span::Symbol;
use std::iter;
-use crate::renumber::{BoundRegionInfo, RegionCtxt};
+use crate::renumber::RegionCtxt;
use crate::BorrowckInferCtxt;
#[derive(Debug)]
@@ -332,10 +333,16 @@ impl<'tcx> UniversalRegions<'tcx> {
pub(crate) fn annotate(&self, tcx: TyCtxt<'tcx>, err: &mut Diagnostic) {
match self.defining_ty {
DefiningTy::Closure(def_id, args) => {
+ let v = with_no_trimmed_paths!(
+ args[tcx.generics_of(def_id).parent_count..]
+ .iter()
+ .map(|arg| arg.to_string())
+ .collect::<Vec<_>>()
+ );
err.note(format!(
- "defining type: {} with closure args {:#?}",
+ "defining type: {} with closure args [\n {},\n]",
tcx.def_path_str_with_args(def_id, args),
- &args[tcx.generics_of(def_id).parent_count..],
+ v.join(",\n "),
));
// FIXME: It'd be nice to print the late-bound regions
@@ -348,10 +355,16 @@ impl<'tcx> UniversalRegions<'tcx> {
});
}
DefiningTy::Generator(def_id, args, _) => {
+ let v = with_no_trimmed_paths!(
+ args[tcx.generics_of(def_id).parent_count..]
+ .iter()
+ .map(|arg| arg.to_string())
+ .collect::<Vec<_>>()
+ );
err.note(format!(
- "defining type: {} with generator args {:#?}",
+ "defining type: {} with generator args [\n {},\n]",
tcx.def_path_str_with_args(def_id, args),
- &args[tcx.generics_of(def_id).parent_count..],
+ v.join(",\n "),
));
// FIXME: As above, we'd like to print out the region
@@ -433,9 +446,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
if !indices.indices.contains_key(&r) {
let region_vid = {
let name = r.get_name_or_anon();
- self.infcx.next_nll_region_var(FR, || {
- RegionCtxt::LateBound(BoundRegionInfo::Name(name))
- })
+ self.infcx.next_nll_region_var(FR, || RegionCtxt::LateBound(name))
};
debug!(?region_vid);
@@ -467,9 +478,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
if !indices.indices.contains_key(&r) {
let region_vid = {
let name = r.get_name_or_anon();
- self.infcx.next_nll_region_var(FR, || {
- RegionCtxt::LateBound(BoundRegionInfo::Name(name))
- })
+ self.infcx.next_nll_region_var(FR, || RegionCtxt::LateBound(name))
};
debug!(?region_vid);
@@ -567,7 +576,7 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
}
}
- BodyOwnerKind::Const | BodyOwnerKind::Static(..) => {
+ BodyOwnerKind::Const { .. } | BodyOwnerKind::Static(..) => {
let identity_args = GenericArgs::identity_for_item(tcx, typeck_root_def_id);
if self.mir_def.to_def_id() == typeck_root_def_id {
let args =
@@ -630,10 +639,9 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
};
let global_mapping = iter::once((tcx.lifetimes.re_static, fr_static));
- let subst_mapping =
- iter::zip(identity_args.regions(), fr_args.regions().map(|r| r.as_var()));
+ let arg_mapping = iter::zip(identity_args.regions(), fr_args.regions().map(|r| r.as_var()));
- UniversalRegionIndices { indices: global_mapping.chain(subst_mapping).collect(), fr_static }
+ UniversalRegionIndices { indices: global_mapping.chain(arg_mapping).collect(), fr_static }
}
fn compute_inputs_and_output(
@@ -783,7 +791,7 @@ impl<'cx, 'tcx> InferCtxtExt<'tcx> for BorrowckInferCtxt<'cx, 'tcx> {
_ => sym::anon,
};
- self.next_nll_region_var(origin, || RegionCtxt::Bound(BoundRegionInfo::Name(name)))
+ self.next_nll_region_var(origin, || RegionCtxt::Bound(name))
};
indices.insert_late_bound_region(liberated_region, region_vid.as_var());
@@ -813,9 +821,7 @@ impl<'cx, 'tcx> InferCtxtExt<'tcx> for BorrowckInferCtxt<'cx, 'tcx> {
if !indices.indices.contains_key(&r) {
let region_vid = {
let name = r.get_name_or_anon();
- self.next_nll_region_var(FR, || {
- RegionCtxt::LateBound(BoundRegionInfo::Name(name))
- })
+ self.next_nll_region_var(FR, || RegionCtxt::LateBound(name))
};
debug!(?region_vid);
@@ -835,9 +841,7 @@ impl<'cx, 'tcx> InferCtxtExt<'tcx> for BorrowckInferCtxt<'cx, 'tcx> {
if !indices.indices.contains_key(&r) {
let region_vid = {
let name = r.get_name_or_anon();
- self.next_nll_region_var(FR, || {
- RegionCtxt::LateBound(BoundRegionInfo::Name(name))
- })
+ self.next_nll_region_var(FR, || RegionCtxt::LateBound(name))
};
indices.insert_late_bound_region(r, region_vid.as_var());
diff --git a/compiler/rustc_builtin_macros/messages.ftl b/compiler/rustc_builtin_macros/messages.ftl
index 8d8db4c13..207ae8ad8 100644
--- a/compiler/rustc_builtin_macros/messages.ftl
+++ b/compiler/rustc_builtin_macros/messages.ftl
@@ -137,6 +137,8 @@ builtin_macros_format_positional_after_named = positional arguments cannot follo
.label = positional arguments must be before named arguments
.named_args = named argument
+builtin_macros_format_remove_raw_ident = remove the `r#`
+
builtin_macros_format_requires_string = requires at least a format string argument
builtin_macros_format_string_invalid = invalid format string: {$desc}
@@ -165,6 +167,8 @@ builtin_macros_format_unused_arg = {$named ->
builtin_macros_format_unused_args = multiple unused formatting arguments
.label = multiple missing formatting specifiers
+builtin_macros_format_use_positional = consider using a positional formatting argument instead
+
builtin_macros_global_asm_clobber_abi = `clobber_abi` cannot be used with `global_asm!`
builtin_macros_invalid_crate_attribute = invalid crate attribute
@@ -205,8 +209,6 @@ builtin_macros_requires_cfg_pattern =
builtin_macros_should_panic = functions using `#[should_panic]` must return `()`
-builtin_macros_sugg = consider using a positional formatting argument instead
-
builtin_macros_test_arg_non_lifetime = functions used as tests can not have any non-lifetime generic parameters
builtin_macros_test_args = functions used as tests can not have any arguments
diff --git a/compiler/rustc_builtin_macros/src/assert/context.rs b/compiler/rustc_builtin_macros/src/assert/context.rs
index bda473120..0682d48ac 100644
--- a/compiler/rustc_builtin_macros/src/assert/context.rs
+++ b/compiler/rustc_builtin_macros/src/assert/context.rs
@@ -241,7 +241,7 @@ impl<'cx, 'a> Context<'cx, 'a> {
self.manage_cond_expr(prefix);
self.manage_cond_expr(suffix);
}
- ExprKind::Let(_, local_expr, _) => {
+ ExprKind::Let(_, local_expr, _, _) => {
self.manage_cond_expr(local_expr);
}
ExprKind::Match(local_expr, _) => {
diff --git a/compiler/rustc_builtin_macros/src/deriving/bounds.rs b/compiler/rustc_builtin_macros/src/deriving/bounds.rs
index 2c8e6f99c..8027ca2e7 100644
--- a/compiler/rustc_builtin_macros/src/deriving/bounds.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/bounds.rs
@@ -41,7 +41,7 @@ pub fn expand_deriving_const_param_ty(
path: path_std!(marker::ConstParamTy),
skip_path_as_bound: false,
needs_copy_as_bound_if_packed: false,
- additional_bounds: Vec::new(),
+ additional_bounds: vec![ty::Ty::Path(path_std!(cmp::Eq))],
supports_unions: false,
methods: Vec::new(),
associated_types: Vec::new(),
diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs
index c78a0eb04..a000e4895 100644
--- a/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs
@@ -18,6 +18,20 @@ pub fn expand_deriving_eq(
is_const: bool,
) {
let span = cx.with_def_site_ctxt(span);
+
+ let structural_trait_def = TraitDef {
+ span,
+ path: path_std!(marker::StructuralEq),
+ skip_path_as_bound: true, // crucial!
+ needs_copy_as_bound_if_packed: false,
+ additional_bounds: Vec::new(),
+ supports_unions: true,
+ methods: Vec::new(),
+ associated_types: Vec::new(),
+ is_const: false,
+ };
+ structural_trait_def.expand(cx, mitem, item, push);
+
let trait_def = TraitDef {
span,
path: path_std!(cmp::Eq),
@@ -34,7 +48,7 @@ pub fn expand_deriving_eq(
attributes: thin_vec![
cx.attr_word(sym::inline, span),
cx.attr_nested_word(sym::doc, sym::hidden, span),
- cx.attr_word(sym::no_coverage, span)
+ cx.attr_nested_word(sym::coverage, sym::off, span)
],
fieldless_variants_strategy: FieldlessVariantsStrategy::Unify,
combine_substructure: combine_substructure(Box::new(|a, b, c| {
@@ -44,9 +58,6 @@ pub fn expand_deriving_eq(
associated_types: Vec::new(),
is_const,
};
-
- super::inject_impl_of_structural_trait(cx, span, item, path_std!(marker::StructuralEq), push);
-
trait_def.expand_ext(cx, mitem, item, push, true)
}
diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs
index a71ecc5db..a170468b4 100644
--- a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs
@@ -72,13 +72,20 @@ pub fn expand_deriving_partial_eq(
BlockOrExpr::new_expr(expr)
}
- super::inject_impl_of_structural_trait(
- cx,
+ let structural_trait_def = TraitDef {
span,
- item,
- path_std!(marker::StructuralPartialEq),
- push,
- );
+ path: path_std!(marker::StructuralPartialEq),
+ skip_path_as_bound: true, // crucial!
+ needs_copy_as_bound_if_packed: false,
+ additional_bounds: Vec::new(),
+ // We really don't support unions, but that's already checked by the impl generated below;
+ // a second check here would lead to redundant error messages.
+ supports_unions: true,
+ methods: Vec::new(),
+ associated_types: Vec::new(),
+ is_const: false,
+ };
+ structural_trait_def.expand(cx, mitem, item, push);
// No need to generate `ne`, the default suffices, and not generating it is
// faster.
diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
index 6597ee3cf..7252658d4 100644
--- a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
@@ -88,7 +88,7 @@
//!
//! When generating the `expr` for the `A` impl, the `SubstructureFields` is
//!
-//! ```{.text}
+//! ```text
//! Struct(vec![FieldInfo {
//! span: <span of x>
//! name: Some(<ident of x>),
@@ -99,7 +99,7 @@
//!
//! For the `B` impl, called with `B(a)` and `B(b)`,
//!
-//! ```{.text}
+//! ```text
//! Struct(vec![FieldInfo {
//! span: <span of `i32`>,
//! name: None,
@@ -113,7 +113,7 @@
//! When generating the `expr` for a call with `self == C0(a)` and `other
//! == C0(b)`, the SubstructureFields is
//!
-//! ```{.text}
+//! ```text
//! EnumMatching(0, <ast::Variant for C0>,
//! vec![FieldInfo {
//! span: <span of i32>
@@ -125,7 +125,7 @@
//!
//! For `C1 {x}` and `C1 {x}`,
//!
-//! ```{.text}
+//! ```text
//! EnumMatching(1, <ast::Variant for C1>,
//! vec![FieldInfo {
//! span: <span of x>
@@ -137,7 +137,7 @@
//!
//! For the tags,
//!
-//! ```{.text}
+//! ```text
//! EnumTag(
//! &[<ident of self tag>, <ident of other tag>], <expr to combine with>)
//! ```
@@ -149,7 +149,7 @@
//!
//! A static method on the types above would result in,
//!
-//! ```{.text}
+//! ```text
//! StaticStruct(<ast::VariantData of A>, Named(vec![(<ident of x>, <span of x>)]))
//!
//! StaticStruct(<ast::VariantData of B>, Unnamed(vec![<span of x>]))
@@ -711,7 +711,9 @@ impl<'a> TraitDef<'a> {
.collect();
// Require the current trait.
- bounds.push(cx.trait_bound(trait_path.clone(), self.is_const));
+ if !self.skip_path_as_bound {
+ bounds.push(cx.trait_bound(trait_path.clone(), self.is_const));
+ }
// Add a `Copy` bound if required.
if is_packed && self.needs_copy_as_bound_if_packed {
@@ -722,15 +724,17 @@ impl<'a> TraitDef<'a> {
));
}
- let predicate = ast::WhereBoundPredicate {
- span: self.span,
- bound_generic_params: field_ty_param.bound_generic_params,
- bounded_ty: field_ty_param.ty,
- bounds,
- };
+ if !bounds.is_empty() {
+ let predicate = ast::WhereBoundPredicate {
+ span: self.span,
+ bound_generic_params: field_ty_param.bound_generic_params,
+ bounded_ty: field_ty_param.ty,
+ bounds,
+ };
- let predicate = ast::WherePredicate::BoundPredicate(predicate);
- where_clause.predicates.push(predicate);
+ let predicate = ast::WherePredicate::BoundPredicate(predicate);
+ where_clause.predicates.push(predicate);
+ }
}
}
}
diff --git a/compiler/rustc_builtin_macros/src/deriving/mod.rs b/compiler/rustc_builtin_macros/src/deriving/mod.rs
index d34336e76..a6f3252e7 100644
--- a/compiler/rustc_builtin_macros/src/deriving/mod.rs
+++ b/compiler/rustc_builtin_macros/src/deriving/mod.rs
@@ -2,9 +2,9 @@
use rustc_ast as ast;
use rustc_ast::ptr::P;
-use rustc_ast::{GenericArg, Impl, ItemKind, MetaItem};
+use rustc_ast::{GenericArg, MetaItem};
use rustc_expand::base::{Annotatable, ExpandResult, ExtCtxt, MultiItemModifier};
-use rustc_span::symbol::{sym, Ident, Symbol};
+use rustc_span::symbol::{sym, Symbol};
use rustc_span::Span;
use thin_vec::{thin_vec, ThinVec};
@@ -116,100 +116,6 @@ fn call_unreachable(cx: &ExtCtxt<'_>, span: Span) -> P<ast::Expr> {
}))
}
-// Injects `impl<...> Structural for ItemType<...> { }`. In particular,
-// does *not* add `where T: Structural` for parameters `T` in `...`.
-// (That's the main reason we cannot use TraitDef here.)
-fn inject_impl_of_structural_trait(
- cx: &mut ExtCtxt<'_>,
- span: Span,
- item: &Annotatable,
- structural_path: generic::ty::Path,
- push: &mut dyn FnMut(Annotatable),
-) {
- let Annotatable::Item(item) = item else {
- unreachable!();
- };
-
- let generics = match &item.kind {
- ItemKind::Struct(_, generics) | ItemKind::Enum(_, generics) => generics,
- // Do not inject `impl Structural for Union`. (`PartialEq` does not
- // support unions, so we will see error downstream.)
- ItemKind::Union(..) => return,
- _ => unreachable!(),
- };
-
- // Create generics param list for where clauses and impl headers
- let mut generics = generics.clone();
-
- let ctxt = span.ctxt();
-
- // Create the type of `self`.
- //
- // in addition, remove defaults from generic params (impls cannot have them).
- let self_params: Vec<_> = generics
- .params
- .iter_mut()
- .map(|param| match &mut param.kind {
- ast::GenericParamKind::Lifetime => ast::GenericArg::Lifetime(
- cx.lifetime(param.ident.span.with_ctxt(ctxt), param.ident),
- ),
- ast::GenericParamKind::Type { default } => {
- *default = None;
- ast::GenericArg::Type(cx.ty_ident(param.ident.span.with_ctxt(ctxt), param.ident))
- }
- ast::GenericParamKind::Const { ty: _, kw_span: _, default } => {
- *default = None;
- ast::GenericArg::Const(
- cx.const_ident(param.ident.span.with_ctxt(ctxt), param.ident),
- )
- }
- })
- .collect();
-
- let type_ident = item.ident;
-
- let trait_ref = cx.trait_ref(structural_path.to_path(cx, span, type_ident, &generics));
- let self_type = cx.ty_path(cx.path_all(span, false, vec![type_ident], self_params));
-
- // It would be nice to also encode constraint `where Self: Eq` (by adding it
- // onto `generics` cloned above). Unfortunately, that strategy runs afoul of
- // rust-lang/rust#48214. So we perform that additional check in the compiler
- // itself, instead of encoding it here.
-
- // Keep the lint and stability attributes of the original item, to control
- // how the generated implementation is linted.
- let mut attrs = ast::AttrVec::new();
- attrs.extend(
- item.attrs
- .iter()
- .filter(|a| {
- [sym::allow, sym::warn, sym::deny, sym::forbid, sym::stable, sym::unstable]
- .contains(&a.name_or_empty())
- })
- .cloned(),
- );
- // Mark as `automatically_derived` to avoid some silly lints.
- attrs.push(cx.attr_word(sym::automatically_derived, span));
-
- let newitem = cx.item(
- span,
- Ident::empty(),
- attrs,
- ItemKind::Impl(Box::new(Impl {
- unsafety: ast::Unsafe::No,
- polarity: ast::ImplPolarity::Positive,
- defaultness: ast::Defaultness::Final,
- constness: ast::Const::No,
- generics,
- of_trait: Some(trait_ref),
- self_ty: self_type,
- items: ThinVec::new(),
- })),
- );
-
- push(Annotatable::Item(newitem));
-}
-
fn assert_ty_bounds(
cx: &mut ExtCtxt<'_>,
stmts: &mut ThinVec<ast::Stmt>,
diff --git a/compiler/rustc_builtin_macros/src/errors.rs b/compiler/rustc_builtin_macros/src/errors.rs
index fbf0395bb..1238773d5 100644
--- a/compiler/rustc_builtin_macros/src/errors.rs
+++ b/compiler/rustc_builtin_macros/src/errors.rs
@@ -539,18 +539,29 @@ pub(crate) struct InvalidFormatStringLabel {
}
#[derive(Subdiagnostic)]
-#[multipart_suggestion(
- builtin_macros_sugg,
- style = "verbose",
- applicability = "machine-applicable"
-)]
-pub(crate) struct InvalidFormatStringSuggestion {
- #[suggestion_part(code = "{len}")]
- pub(crate) captured: Span,
- pub(crate) len: String,
- #[suggestion_part(code = ", {arg}")]
- pub(crate) span: Span,
- pub(crate) arg: String,
+pub(crate) enum InvalidFormatStringSuggestion {
+ #[multipart_suggestion(
+ builtin_macros_format_use_positional,
+ style = "verbose",
+ applicability = "machine-applicable"
+ )]
+ UsePositional {
+ #[suggestion_part(code = "{len}")]
+ captured: Span,
+ len: String,
+ #[suggestion_part(code = ", {arg}")]
+ span: Span,
+ arg: String,
+ },
+ #[suggestion(
+ builtin_macros_format_remove_raw_ident,
+ code = "",
+ applicability = "machine-applicable"
+ )]
+ RemoveRawIdent {
+ #[primary_span]
+ span: Span,
+ },
}
#[derive(Diagnostic)]
diff --git a/compiler/rustc_builtin_macros/src/format.rs b/compiler/rustc_builtin_macros/src/format.rs
index ede95dbf8..8397b5e42 100644
--- a/compiler/rustc_builtin_macros/src/format.rs
+++ b/compiler/rustc_builtin_macros/src/format.rs
@@ -260,20 +260,29 @@ fn make_format_args(
if let Some((label, span)) = err.secondary_label && is_source_literal {
e.label_ = Some(errors::InvalidFormatStringLabel { span: fmt_span.from_inner(InnerSpan::new(span.start, span.end)), label } );
}
- if err.should_be_replaced_with_positional_argument {
- let captured_arg_span =
- fmt_span.from_inner(InnerSpan::new(err.span.start, err.span.end));
- if let Ok(arg) = ecx.source_map().span_to_snippet(captured_arg_span) {
- let span = match args.unnamed_args().last() {
- Some(arg) => arg.expr.span,
- None => fmt_span,
- };
- e.sugg_ = Some(errors::InvalidFormatStringSuggestion {
- captured: captured_arg_span,
- len: args.unnamed_args().len().to_string(),
- span: span.shrink_to_hi(),
- arg,
- });
+ match err.suggestion {
+ parse::Suggestion::None => {}
+ parse::Suggestion::UsePositional => {
+ let captured_arg_span =
+ fmt_span.from_inner(InnerSpan::new(err.span.start, err.span.end));
+ if let Ok(arg) = ecx.source_map().span_to_snippet(captured_arg_span) {
+ let span = match args.unnamed_args().last() {
+ Some(arg) => arg.expr.span,
+ None => fmt_span,
+ };
+ e.sugg_ = Some(errors::InvalidFormatStringSuggestion::UsePositional {
+ captured: captured_arg_span,
+ len: args.unnamed_args().len().to_string(),
+ span: span.shrink_to_hi(),
+ arg,
+ });
+ }
+ }
+ parse::Suggestion::RemoveRawIdent(span) => {
+ if is_source_literal {
+ let span = fmt_span.from_inner(InnerSpan::new(span.start, span.end));
+ e.sugg_ = Some(errors::InvalidFormatStringSuggestion::RemoveRawIdent { span })
+ }
}
}
ecx.emit_err(e);
diff --git a/compiler/rustc_builtin_macros/src/source_util.rs b/compiler/rustc_builtin_macros/src/source_util.rs
index 433da7423..953d957a4 100644
--- a/compiler/rustc_builtin_macros/src/source_util.rs
+++ b/compiler/rustc_builtin_macros/src/source_util.rs
@@ -217,7 +217,7 @@ pub fn expand_include_bytes(
};
match cx.source_map().load_binary_file(&file) {
Ok(bytes) => {
- let expr = cx.expr(sp, ast::ExprKind::IncludedBytes(bytes.into()));
+ let expr = cx.expr(sp, ast::ExprKind::IncludedBytes(bytes));
base::MacEager::expr(expr)
}
Err(e) => {
diff --git a/compiler/rustc_builtin_macros/src/test_harness.rs b/compiler/rustc_builtin_macros/src/test_harness.rs
index d8846a9f0..53ff089d7 100644
--- a/compiler/rustc_builtin_macros/src/test_harness.rs
+++ b/compiler/rustc_builtin_macros/src/test_harness.rs
@@ -254,7 +254,7 @@ fn generate_test_harness(
let expn_id = ext_cx.resolver.expansion_for_ast_pass(
DUMMY_SP,
AstPass::TestHarness,
- &[sym::test, sym::rustc_attrs, sym::no_coverage],
+ &[sym::test, sym::rustc_attrs, sym::coverage_attribute],
None,
);
let def_site = DUMMY_SP.with_def_site_ctxt(expn_id.to_expn_id());
@@ -335,8 +335,8 @@ fn mk_main(cx: &mut TestCtxt<'_>) -> P<ast::Item> {
// #[rustc_main]
let main_attr = ecx.attr_word(sym::rustc_main, sp);
- // #[no_coverage]
- let no_coverage_attr = ecx.attr_word(sym::no_coverage, sp);
+ // #[coverage(off)]
+ let coverage_attr = ecx.attr_nested_word(sym::coverage, sym::off, sp);
// pub fn main() { ... }
let main_ret_ty = ecx.ty(sp, ast::TyKind::Tup(ThinVec::new()));
@@ -366,7 +366,7 @@ fn mk_main(cx: &mut TestCtxt<'_>) -> P<ast::Item> {
let main = P(ast::Item {
ident: main_id,
- attrs: thin_vec![main_attr, no_coverage_attr],
+ attrs: thin_vec![main_attr, coverage_attr],
id: ast::DUMMY_NODE_ID,
kind: main,
vis: ast::Visibility { span: sp, kind: ast::VisibilityKind::Public, tokens: None },
diff --git a/compiler/rustc_codegen_cranelift/docs/usage.md b/compiler/rustc_codegen_cranelift/docs/usage.md
index c6210f958..135a51ce3 100644
--- a/compiler/rustc_codegen_cranelift/docs/usage.md
+++ b/compiler/rustc_codegen_cranelift/docs/usage.md
@@ -54,7 +54,7 @@ These are a few functions that allow you to easily run rust code from the shell
```bash
function jit_naked() {
- echo "$@" | $cg_clif_dir/dist/rustc-clif - -Zunstable-features -Cllvm-args=mode=jit -Cprefer-dynamic
+ echo "$@" | $cg_clif_dir/dist/rustc-clif - -Zunstable-options -Cllvm-args=mode=jit-lazy -Cprefer-dynamic
}
function jit() {
diff --git a/compiler/rustc_codegen_cranelift/patches/stdlib-lock.toml b/compiler/rustc_codegen_cranelift/patches/stdlib-lock.toml
index fa175edca..5b79d6569 100644
--- a/compiler/rustc_codegen_cranelift/patches/stdlib-lock.toml
+++ b/compiler/rustc_codegen_cranelift/patches/stdlib-lock.toml
@@ -4,9 +4,9 @@ version = 3
[[package]]
name = "addr2line"
-version = "0.20.0"
+version = "0.21.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3"
+checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"
dependencies = [
"compiler_builtins",
"gimli",
@@ -140,9 +140,9 @@ dependencies = [
[[package]]
name = "gimli"
-version = "0.27.2"
+version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4"
+checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0"
dependencies = [
"compiler_builtins",
"rustc-std-workspace-alloc",
@@ -205,9 +205,9 @@ dependencies = [
[[package]]
name = "object"
-version = "0.31.1"
+version = "0.32.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1"
+checksum = "77ac5bbd07aea88c60a577a1ce218075ffd59208b2d7ca97adf9bfc5aeb21ebe"
dependencies = [
"compiler_builtins",
"memchr",
diff --git a/compiler/rustc_codegen_cranelift/rust-toolchain b/compiler/rustc_codegen_cranelift/rust-toolchain
index 5689bdee6..2cc5d7777 100644
--- a/compiler/rustc_codegen_cranelift/rust-toolchain
+++ b/compiler/rustc_codegen_cranelift/rust-toolchain
@@ -1,3 +1,3 @@
[toolchain]
-channel = "nightly-2023-08-08"
+channel = "nightly-2023-09-06"
components = ["rust-src", "rustc-dev", "llvm-tools"]
diff --git a/compiler/rustc_codegen_cranelift/scripts/filter_profile.rs b/compiler/rustc_codegen_cranelift/scripts/filter_profile.rs
index f782671fe..03912b18e 100755
--- a/compiler/rustc_codegen_cranelift/scripts/filter_profile.rs
+++ b/compiler/rustc_codegen_cranelift/scripts/filter_profile.rs
@@ -100,9 +100,9 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
stack = &stack[..index + ENCODE_METADATA.len()];
}
- const SUBST_AND_NORMALIZE_ERASING_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::subst_and_normalize_erasing_regions";
- if let Some(index) = stack.find(SUBST_AND_NORMALIZE_ERASING_REGIONS) {
- stack = &stack[..index + SUBST_AND_NORMALIZE_ERASING_REGIONS.len()];
+ const INSTANTIATE_AND_NORMALIZE_ERASING_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::instantiate_and_normalize_erasing_regions";
+ if let Some(index) = stack.find(INSTANTIATE_AND_NORMALIZE_ERASING_REGIONS) {
+ stack = &stack[..index + INSTANTIATE_AND_NORMALIZE_ERASING_REGIONS.len()];
}
const NORMALIZE_ERASING_LATE_BOUND_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::normalize_erasing_late_bound_regions";
diff --git a/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh b/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
index c163b8543..3fc462a39 100755
--- a/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
+++ b/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
@@ -45,6 +45,7 @@ rm tests/ui/proc-macro/quote-debug.rs
rm tests/ui/proc-macro/no-missing-docs.rs
rm tests/ui/rust-2018/proc-macro-crate-in-paths.rs
rm tests/ui/proc-macro/allowed-signatures.rs
+rm tests/ui/proc-macro/no-mangle-in-proc-macro-issue-111888.rs
# vendor intrinsics
rm tests/ui/sse2.rs # cpuid not supported, so sse2 not detected
@@ -114,6 +115,7 @@ rm tests/ui/mir/mir_misc_casts.rs # depends on deduplication of constants
rm tests/ui/mir/mir_raw_fat_ptr.rs # same
rm tests/ui/consts/issue-33537.rs # same
rm tests/ui/layout/valid_range_oob.rs # different ICE message
+rm tests/ui/const-generics/generic_const_exprs/issue-80742.rs # gives error instead of ICE with cg_clif
rm tests/ui/consts/issue-miri-1910.rs # different error message
rm tests/ui/consts/offset_ub.rs # same
diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
index b7f56a298..5d775b9b5 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
@@ -39,7 +39,7 @@ fn clif_sig_from_fn_abi<'tcx>(
pub(crate) fn conv_to_call_conv(sess: &Session, c: Conv, default_call_conv: CallConv) -> CallConv {
match c {
Conv::Rust | Conv::C => default_call_conv,
- Conv::RustCold => CallConv::Cold,
+ Conv::Cold | Conv::PreserveMost | Conv::PreserveAll => CallConv::Cold,
Conv::X86_64SysV => CallConv::SystemV,
Conv::X86_64Win64 => CallConv::WindowsFastcall,
diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
index d847e524f..0d16da480 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
@@ -100,11 +100,11 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
}
_ => unreachable!("{:?}", self.layout.abi),
},
- PassMode::Cast(ref cast, pad_i32) => {
+ PassMode::Cast { ref cast, pad_i32 } => {
assert!(!pad_i32, "padding support not yet implemented");
cast_target_to_abi_params(cast)
}
- PassMode::Indirect { attrs, extra_attrs: None, on_stack } => {
+ PassMode::Indirect { attrs, meta_attrs: None, on_stack } => {
if on_stack {
// Abi requires aligning struct size to pointer size
let size = self.layout.size.align_to(tcx.data_layout.pointer_align.abi);
@@ -117,11 +117,11 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
smallvec![apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), attrs)]
}
}
- PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => {
+ PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => {
assert!(!on_stack);
smallvec![
apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), attrs),
- apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), extra_attrs),
+ apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), meta_attrs),
]
}
}
@@ -148,14 +148,14 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
}
_ => unreachable!("{:?}", self.layout.abi),
},
- PassMode::Cast(ref cast, _) => {
+ PassMode::Cast { ref cast, .. } => {
(None, cast_target_to_abi_params(cast).into_iter().collect())
}
- PassMode::Indirect { attrs: _, extra_attrs: None, on_stack } => {
+ PassMode::Indirect { attrs: _, meta_attrs: None, on_stack } => {
assert!(!on_stack);
(Some(AbiParam::special(pointer_ty(tcx), ArgumentPurpose::StructReturn)), vec![])
}
- PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
unreachable!("unsized return value")
}
}
@@ -229,7 +229,7 @@ pub(super) fn adjust_arg_for_abi<'tcx>(
let (a, b) = arg.load_scalar_pair(fx);
smallvec![a, b]
}
- PassMode::Cast(ref cast, _) => to_casted_value(fx, arg, cast),
+ PassMode::Cast { ref cast, .. } => to_casted_value(fx, arg, cast),
PassMode::Indirect { .. } => {
if is_owned {
match arg.force_stack(fx) {
@@ -287,14 +287,14 @@ pub(super) fn cvalue_for_param<'tcx>(
assert_eq!(block_params.len(), 2, "{:?}", block_params);
Some(CValue::by_val_pair(block_params[0], block_params[1], arg_abi.layout))
}
- PassMode::Cast(ref cast, _) => {
+ PassMode::Cast { ref cast, .. } => {
Some(from_casted_value(fx, &block_params, arg_abi.layout, cast))
}
- PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
assert_eq!(block_params.len(), 1, "{:?}", block_params);
Some(CValue::by_ref(Pointer::new(block_params[0]), arg_abi.layout))
}
- PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
assert_eq!(block_params.len(), 2, "{:?}", block_params);
Some(CValue::by_ref_unsized(
Pointer::new(block_params[0]),
diff --git a/compiler/rustc_codegen_cranelift/src/abi/returning.rs b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
index 14e54d5ee..646fb4a3c 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/returning.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
@@ -13,7 +13,7 @@ pub(super) fn codegen_return_param<'tcx>(
block_params_iter: &mut impl Iterator<Item = Value>,
) -> CPlace<'tcx> {
let (ret_place, ret_param): (_, SmallVec<[_; 2]>) = match fx.fn_abi.as_ref().unwrap().ret.mode {
- PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(..) => {
+ PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast { .. } => {
let is_ssa =
ssa_analyzed[RETURN_PLACE].is_ssa(fx, fx.fn_abi.as_ref().unwrap().ret.layout.ty);
(
@@ -26,7 +26,7 @@ pub(super) fn codegen_return_param<'tcx>(
smallvec![],
)
}
- PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
let ret_param = block_params_iter.next().unwrap();
assert_eq!(fx.bcx.func.dfg.value_type(ret_param), fx.pointer_type);
(
@@ -34,7 +34,7 @@ pub(super) fn codegen_return_param<'tcx>(
smallvec![ret_param],
)
}
- PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
unreachable!("unsized return value")
}
};
@@ -62,7 +62,7 @@ pub(super) fn codegen_with_call_return_arg<'tcx>(
) {
let (ret_temp_place, return_ptr) = match ret_arg_abi.mode {
PassMode::Ignore => (None, None),
- PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
if let Some(ret_ptr) = ret_place.try_to_ptr() {
// This is an optimization to prevent unnecessary copies of the return value when
// the return place is already a memory place as opposed to a register.
@@ -73,10 +73,10 @@ pub(super) fn codegen_with_call_return_arg<'tcx>(
(Some(place), Some(place.to_ptr().get_addr(fx)))
}
}
- PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
unreachable!("unsized return value")
}
- PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(..) => (None, None),
+ PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast { .. } => (None, None),
};
let call_inst = f(fx, return_ptr);
@@ -93,21 +93,21 @@ pub(super) fn codegen_with_call_return_arg<'tcx>(
ret_place
.write_cvalue(fx, CValue::by_val_pair(ret_val_a, ret_val_b, ret_arg_abi.layout));
}
- PassMode::Cast(ref cast, _) => {
+ PassMode::Cast { ref cast, .. } => {
let results =
fx.bcx.inst_results(call_inst).iter().copied().collect::<SmallVec<[Value; 2]>>();
let result =
super::pass_mode::from_casted_value(fx, &results, ret_place.layout(), cast);
ret_place.write_cvalue(fx, result);
}
- PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
if let Some(ret_temp_place) = ret_temp_place {
// If ret_temp_place is None, it is not necessary to copy the return value.
let ret_temp_value = ret_temp_place.to_cvalue(fx);
ret_place.write_cvalue(fx, ret_temp_value);
}
}
- PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
unreachable!("unsized return value")
}
}
@@ -116,10 +116,10 @@ pub(super) fn codegen_with_call_return_arg<'tcx>(
/// Codegen a return instruction with the right return value(s) if any.
pub(crate) fn codegen_return(fx: &mut FunctionCx<'_, '_, '_>) {
match fx.fn_abi.as_ref().unwrap().ret.mode {
- PassMode::Ignore | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ PassMode::Ignore | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
fx.bcx.ins().return_(&[]);
}
- PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
unreachable!("unsized return value")
}
PassMode::Direct(_) => {
@@ -132,7 +132,7 @@ pub(crate) fn codegen_return(fx: &mut FunctionCx<'_, '_, '_>) {
let (ret_val_a, ret_val_b) = place.to_cvalue(fx).load_scalar_pair(fx);
fx.bcx.ins().return_(&[ret_val_a, ret_val_b]);
}
- PassMode::Cast(ref cast, _) => {
+ PassMode::Cast { ref cast, .. } => {
let place = fx.get_local_place(RETURN_PLACE);
let ret_val = place.to_cvalue(fx);
let ret_vals = super::pass_mode::to_casted_value(fx, ret_val, cast);
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
index 522dd7189..0a451dad9 100644
--- a/compiler/rustc_codegen_cranelift/src/base.rs
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -250,7 +250,10 @@ pub(crate) fn verify_func(
}
fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
- if !crate::constant::check_constants(fx) {
+ if let Err(err) =
+ fx.mir.post_mono_checks(fx.tcx, ty::ParamEnv::reveal_all(), |c| Ok(fx.monomorphize(c)))
+ {
+ err.emit_err(fx.tcx);
fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
// compilation should have been aborted
@@ -474,10 +477,10 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
*destination,
);
}
- TerminatorKind::Terminate => {
- codegen_panic_cannot_unwind(fx, source_info);
+ TerminatorKind::UnwindTerminate(reason) => {
+ codegen_unwind_terminate(fx, source_info, *reason);
}
- TerminatorKind::Resume => {
+ TerminatorKind::UnwindResume => {
// FIXME implement unwinding
fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
}
@@ -723,11 +726,8 @@ fn codegen_stmt<'tcx>(
}
Rvalue::Repeat(ref operand, times) => {
let operand = codegen_operand(fx, operand);
- let times = fx
- .monomorphize(times)
- .eval(fx.tcx, ParamEnv::reveal_all())
- .try_to_bits(fx.tcx.data_layout.pointer_size)
- .unwrap();
+ let times =
+ fx.monomorphize(times).eval_target_usize(fx.tcx, ParamEnv::reveal_all());
if operand.layout().size.bytes() == 0 {
// Do nothing for ZST's
} else if fx.clif_type(operand.layout().ty) == Some(types::I8) {
@@ -875,7 +875,8 @@ pub(crate) fn codegen_place<'tcx>(
PlaceElem::Deref => {
cplace = cplace.place_deref(fx);
}
- PlaceElem::OpaqueCast(ty) => cplace = cplace.place_opaque_cast(fx, ty),
+ PlaceElem::OpaqueCast(ty) => bug!("encountered OpaqueCast({ty}) in codegen"),
+ PlaceElem::Subtype(ty) => cplace = cplace.place_transmute_type(fx, fx.monomorphize(ty)),
PlaceElem::Field(field, _ty) => {
cplace = cplace.place_field(fx, field);
}
@@ -971,13 +972,14 @@ pub(crate) fn codegen_panic_nounwind<'tcx>(
codegen_panic_inner(fx, rustc_hir::LangItem::PanicNounwind, &args, source_info.span);
}
-pub(crate) fn codegen_panic_cannot_unwind<'tcx>(
+pub(crate) fn codegen_unwind_terminate<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
source_info: mir::SourceInfo,
+ reason: UnwindTerminateReason,
) {
let args = [];
- codegen_panic_inner(fx, rustc_hir::LangItem::PanicCannotUnwind, &args, source_info.span);
+ codegen_panic_inner(fx, reason.lang_item(), &args, source_info.span);
}
fn codegen_panic_inner<'tcx>(
diff --git a/compiler/rustc_codegen_cranelift/src/common.rs b/compiler/rustc_codegen_cranelift/src/common.rs
index 3081dcfa2..359b430b4 100644
--- a/compiler/rustc_codegen_cranelift/src/common.rs
+++ b/compiler/rustc_codegen_cranelift/src/common.rs
@@ -359,7 +359,7 @@ impl<'tcx> FunctionCx<'_, '_, 'tcx> {
where
T: TypeFoldable<TyCtxt<'tcx>> + Copy,
{
- self.instance.subst_mir_and_normalize_erasing_regions(
+ self.instance.instantiate_mir_and_normalize_erasing_regions(
self.tcx,
ty::ParamEnv::reveal_all(),
ty::EarlyBinder::bind(value),
@@ -480,7 +480,7 @@ impl<'tcx> LayoutOfHelpers<'tcx> for RevealAllLayoutCx<'tcx> {
if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
self.0.sess.span_fatal(span, err.to_string())
} else {
- span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
+ self.0.sess.span_fatal(span, format!("failed to get layout for `{}`: {}", ty, err))
}
}
}
diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs
index c31535742..14b10ed8b 100644
--- a/compiler/rustc_codegen_cranelift/src/constant.rs
+++ b/compiler/rustc_codegen_cranelift/src/constant.rs
@@ -2,9 +2,8 @@
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
-use rustc_middle::mir::interpret::{
- read_target_uint, AllocId, ConstAllocation, ConstValue, ErrorHandled, GlobalAlloc, Scalar,
-};
+use rustc_middle::mir::interpret::{read_target_uint, AllocId, GlobalAlloc, Scalar};
+use rustc_middle::mir::ConstValue;
use cranelift_module::*;
@@ -33,16 +32,6 @@ impl ConstantCx {
}
}
-pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, '_>) -> bool {
- let mut all_constants_ok = true;
- for constant in &fx.mir.required_consts {
- if eval_mir_constant(fx, constant).is_none() {
- all_constants_ok = false;
- }
- }
- all_constants_ok
-}
-
pub(crate) fn codegen_static(tcx: TyCtxt<'_>, module: &mut dyn Module, def_id: DefId) {
let mut constants_cx = ConstantCx::new();
constants_cx.todo.push(TodoItem::Static(def_id));
@@ -75,53 +64,21 @@ pub(crate) fn codegen_tls_ref<'tcx>(
pub(crate) fn eval_mir_constant<'tcx>(
fx: &FunctionCx<'_, '_, 'tcx>,
- constant: &Constant<'tcx>,
-) -> Option<(ConstValue<'tcx>, Ty<'tcx>)> {
- let constant_kind = fx.monomorphize(constant.literal);
- let uv = match constant_kind {
- ConstantKind::Ty(const_) => match const_.kind() {
- ty::ConstKind::Unevaluated(uv) => uv.expand(),
- ty::ConstKind::Value(val) => {
- return Some((fx.tcx.valtree_to_const_val((const_.ty(), val)), const_.ty()));
- }
- err => span_bug!(
- constant.span,
- "encountered bad ConstKind after monomorphizing: {:?}",
- err
- ),
- },
- ConstantKind::Unevaluated(mir::UnevaluatedConst { def, .. }, _)
- if fx.tcx.is_static(def) =>
- {
- span_bug!(constant.span, "MIR constant refers to static");
- }
- ConstantKind::Unevaluated(uv, _) => uv,
- ConstantKind::Val(val, _) => return Some((val, constant_kind.ty())),
- };
-
- let val = fx
- .tcx
- .const_eval_resolve(ty::ParamEnv::reveal_all(), uv, None)
- .map_err(|err| match err {
- ErrorHandled::Reported(_) => {
- fx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
- }
- ErrorHandled::TooGeneric => {
- span_bug!(constant.span, "codegen encountered polymorphic constant: {:?}", err);
- }
- })
- .ok();
- val.map(|val| (val, constant_kind.ty()))
+ constant: &ConstOperand<'tcx>,
+) -> (ConstValue<'tcx>, Ty<'tcx>) {
+ let cv = fx.monomorphize(constant.const_);
+ // This cannot fail because we checked all required_consts in advance.
+ let val = cv
+ .eval(fx.tcx, ty::ParamEnv::reveal_all(), Some(constant.span))
+ .expect("erroneous constant not captured by required_consts");
+ (val, cv.ty())
}
pub(crate) fn codegen_constant_operand<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
- constant: &Constant<'tcx>,
+ constant: &ConstOperand<'tcx>,
) -> CValue<'tcx> {
- let (const_val, ty) = eval_mir_constant(fx, constant).unwrap_or_else(|| {
- span_bug!(constant.span, "erroneous constant not captured by required_consts")
- });
-
+ let (const_val, ty) = eval_mir_constant(fx, constant);
codegen_const_value(fx, const_val, ty)
}
@@ -138,7 +95,7 @@ pub(crate) fn codegen_const_value<'tcx>(
}
match const_val {
- ConstValue::ZeroSized => unreachable!(), // we already handles ZST above
+ ConstValue::ZeroSized => unreachable!(), // we already handled ZST above
ConstValue::Scalar(x) => match x {
Scalar::Int(int) => {
if fx.clif_type(layout.ty).is_some() {
@@ -222,19 +179,16 @@ pub(crate) fn codegen_const_value<'tcx>(
CValue::by_val(val, layout)
}
},
- ConstValue::ByRef { alloc, offset } => CValue::by_ref(
- pointer_for_allocation(fx, alloc)
+ ConstValue::Indirect { alloc_id, offset } => CValue::by_ref(
+ pointer_for_allocation(fx, alloc_id)
.offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
layout,
),
- ConstValue::Slice { data, start, end } => {
- let ptr = pointer_for_allocation(fx, data)
- .offset_i64(fx, i64::try_from(start).unwrap())
- .get_addr(fx);
- let len = fx
- .bcx
- .ins()
- .iconst(fx.pointer_type, i64::try_from(end.checked_sub(start).unwrap()).unwrap());
+ ConstValue::Slice { data, meta } => {
+ let alloc_id = fx.tcx.reserve_and_set_memory_alloc(data);
+ let ptr = pointer_for_allocation(fx, alloc_id).get_addr(fx);
+ // FIXME: the `try_from` here can actually fail, e.g. for very long ZST slices.
+ let len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(meta).unwrap());
CValue::by_val_pair(ptr, len, layout)
}
}
@@ -242,9 +196,9 @@ pub(crate) fn codegen_const_value<'tcx>(
fn pointer_for_allocation<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
- alloc: ConstAllocation<'tcx>,
+ alloc_id: AllocId,
) -> crate::pointer::Pointer {
- let alloc_id = fx.tcx.create_memory_alloc(alloc);
+ let alloc = fx.tcx.global_alloc(alloc_id).unwrap_memory();
let data_id = data_id_for_alloc_id(
&mut fx.constants_cx,
&mut *fx.module,
@@ -375,6 +329,7 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut Constant
unreachable!()
}
};
+ // FIXME: should we have a cache so we don't do this multiple times for the same `ConstAllocation`?
let data_id = *cx.anon_allocs.entry(alloc_id).or_insert_with(|| {
module.declare_anonymous_data(alloc.inner().mutability.is_mut(), false).unwrap()
});
@@ -479,7 +434,7 @@ pub(crate) fn mir_operand_get_const_val<'tcx>(
operand: &Operand<'tcx>,
) -> Option<ConstValue<'tcx>> {
match operand {
- Operand::Constant(const_) => Some(eval_mir_constant(fx, const_).unwrap().0),
+ Operand::Constant(const_) => Some(eval_mir_constant(fx, const_).0),
// FIXME(rust-lang/rust#85105): Casts like `IMM8 as u32` result in the const being stored
// inside a temporary before being passed to the intrinsic requiring the const argument.
// This code tries to find a single constant defining definition of the referenced local.
@@ -550,8 +505,8 @@ pub(crate) fn mir_operand_get_const_val<'tcx>(
match &bb_data.terminator().kind {
TerminatorKind::Goto { .. }
| TerminatorKind::SwitchInt { .. }
- | TerminatorKind::Resume
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindResume
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::Drop { .. }
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
index 50bc7a127..b19b935a0 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
@@ -81,13 +81,10 @@ impl DebugContext {
match tcx.sess.source_map().lookup_line(span.lo()) {
Ok(SourceFileAndLine { sf: file, line }) => {
- let line_pos = file.lines(|lines| lines[line]);
+ let line_pos = file.lines()[line];
+ let col = file.relative_position(span.lo()) - line_pos;
- (
- file,
- u64::try_from(line).unwrap() + 1,
- u64::from((span.lo() - line_pos).to_u32()) + 1,
- )
+ (file, u64::try_from(line).unwrap() + 1, u64::from(col.to_u32()) + 1)
}
Err(file) => (file, 0, 0),
}
diff --git a/compiler/rustc_codegen_cranelift/src/driver/aot.rs b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
index d143bcc96..3e9383095 100644
--- a/compiler/rustc_codegen_cranelift/src/driver/aot.rs
+++ b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
@@ -269,7 +269,7 @@ fn module_codegen(
),
) -> OngoingModuleCodegen {
let (cgu_name, mut cx, mut module, codegened_functions) =
- tcx.prof.verbose_generic_activity_with_arg("codegen cgu", cgu_name.as_str()).run(|| {
+ tcx.prof.generic_activity_with_arg("codegen cgu", cgu_name.as_str()).run(|| {
let cgu = tcx.codegen_unit(cgu_name);
let mono_items = cgu.items_in_deterministic_order(tcx);
@@ -322,35 +322,24 @@ fn module_codegen(
});
OngoingModuleCodegen::Async(std::thread::spawn(move || {
- cx.profiler.clone().verbose_generic_activity_with_arg("compile functions", &*cgu_name).run(
- || {
- cranelift_codegen::timing::set_thread_profiler(Box::new(super::MeasuremeProfiler(
- cx.profiler.clone(),
- )));
-
- let mut cached_context = Context::new();
- for codegened_func in codegened_functions {
- crate::base::compile_fn(
- &mut cx,
- &mut cached_context,
- &mut module,
- codegened_func,
- );
- }
- },
- );
+ cx.profiler.clone().generic_activity_with_arg("compile functions", &*cgu_name).run(|| {
+ cranelift_codegen::timing::set_thread_profiler(Box::new(super::MeasuremeProfiler(
+ cx.profiler.clone(),
+ )));
+
+ let mut cached_context = Context::new();
+ for codegened_func in codegened_functions {
+ crate::base::compile_fn(&mut cx, &mut cached_context, &mut module, codegened_func);
+ }
+ });
- let global_asm_object_file = cx
- .profiler
- .verbose_generic_activity_with_arg("compile assembly", &*cgu_name)
- .run(|| {
+ let global_asm_object_file =
+ cx.profiler.generic_activity_with_arg("compile assembly", &*cgu_name).run(|| {
crate::global_asm::compile_global_asm(&global_asm_config, &cgu_name, &cx.global_asm)
})?;
- let codegen_result = cx
- .profiler
- .verbose_generic_activity_with_arg("write object file", &*cgu_name)
- .run(|| {
+ let codegen_result =
+ cx.profiler.generic_activity_with_arg("write object file", &*cgu_name).run(|| {
emit_cgu(
&global_asm_config.output_filenames,
&cx.profiler,
diff --git a/compiler/rustc_codegen_cranelift/src/inline_asm.rs b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
index 518e3da07..50bbf8105 100644
--- a/compiler/rustc_codegen_cranelift/src/inline_asm.rs
+++ b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
@@ -242,8 +242,7 @@ pub(crate) fn codegen_inline_asm<'tcx>(
}
}
InlineAsmOperand::Const { ref value } => {
- let (const_value, ty) = crate::constant::eval_mir_constant(fx, value)
- .unwrap_or_else(|| span_bug!(span, "asm const cannot be resolved"));
+ let (const_value, ty) = crate::constant::eval_mir_constant(fx, value);
let value = rustc_codegen_ssa::common::asm_const_to_str(
fx.tcx,
span,
@@ -253,8 +252,8 @@ pub(crate) fn codegen_inline_asm<'tcx>(
CInlineAsmOperand::Const { value }
}
InlineAsmOperand::SymFn { ref value } => {
- let literal = fx.monomorphize(value.literal);
- if let ty::FnDef(def_id, args) = *literal.ty().kind() {
+ let const_ = fx.monomorphize(value.const_);
+ if let ty::FnDef(def_id, args) = *const_.ty().kind() {
let instance = ty::Instance::resolve_for_fn_ptr(
fx.tcx,
ty::ParamEnv::reveal_all(),
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
index fdd27a454..e62de6b61 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
@@ -177,244 +177,6 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
bool_to_zero_or_max_uint(fx, res_lane_ty, res_lane)
});
}
- "llvm.x86.sse2.psrli.d" => {
- let (a, imm8) = match args {
- [a, imm8] => (a, imm8),
- _ => bug!("wrong number of args for intrinsic {intrinsic}"),
- };
- let a = codegen_operand(fx, a);
- let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
- .expect("llvm.x86.sse2.psrli.d imm8 not const");
-
- simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
- .try_to_bits(Size::from_bytes(4))
- .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
- {
- imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
- _ => fx.bcx.ins().iconst(types::I32, 0),
- });
- }
- "llvm.x86.sse2.psrai.d" => {
- let (a, imm8) = match args {
- [a, imm8] => (a, imm8),
- _ => bug!("wrong number of args for intrinsic {intrinsic}"),
- };
- let a = codegen_operand(fx, a);
- let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
- .expect("llvm.x86.sse2.psrai.d imm8 not const");
-
- simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
- .try_to_bits(Size::from_bytes(4))
- .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
- {
- imm8 if imm8 < 32 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
- _ => fx.bcx.ins().iconst(types::I32, 0),
- });
- }
- "llvm.x86.sse2.pslli.d" => {
- let (a, imm8) = match args {
- [a, imm8] => (a, imm8),
- _ => bug!("wrong number of args for intrinsic {intrinsic}"),
- };
- let a = codegen_operand(fx, a);
- let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
- .expect("llvm.x86.sse2.pslli.d imm8 not const");
-
- simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
- .try_to_bits(Size::from_bytes(4))
- .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
- {
- imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
- _ => fx.bcx.ins().iconst(types::I32, 0),
- });
- }
- "llvm.x86.sse2.psrli.w" => {
- let (a, imm8) = match args {
- [a, imm8] => (a, imm8),
- _ => bug!("wrong number of args for intrinsic {intrinsic}"),
- };
- let a = codegen_operand(fx, a);
- let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
- .expect("llvm.x86.sse2.psrli.d imm8 not const");
-
- simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
- .try_to_bits(Size::from_bytes(4))
- .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
- {
- imm8 if imm8 < 16 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
- _ => fx.bcx.ins().iconst(types::I32, 0),
- });
- }
- "llvm.x86.sse2.psrai.w" => {
- let (a, imm8) = match args {
- [a, imm8] => (a, imm8),
- _ => bug!("wrong number of args for intrinsic {intrinsic}"),
- };
- let a = codegen_operand(fx, a);
- let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
- .expect("llvm.x86.sse2.psrai.d imm8 not const");
-
- simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
- .try_to_bits(Size::from_bytes(4))
- .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
- {
- imm8 if imm8 < 16 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
- _ => fx.bcx.ins().iconst(types::I32, 0),
- });
- }
- "llvm.x86.sse2.pslli.w" => {
- let (a, imm8) = match args {
- [a, imm8] => (a, imm8),
- _ => bug!("wrong number of args for intrinsic {intrinsic}"),
- };
- let a = codegen_operand(fx, a);
- let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
- .expect("llvm.x86.sse2.pslli.d imm8 not const");
-
- simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
- .try_to_bits(Size::from_bytes(4))
- .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
- {
- imm8 if imm8 < 16 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
- _ => fx.bcx.ins().iconst(types::I32, 0),
- });
- }
- "llvm.x86.avx.psrli.d" => {
- let (a, imm8) = match args {
- [a, imm8] => (a, imm8),
- _ => bug!("wrong number of args for intrinsic {intrinsic}"),
- };
- let a = codegen_operand(fx, a);
- let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
- .expect("llvm.x86.avx.psrli.d imm8 not const");
-
- simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
- .try_to_bits(Size::from_bytes(4))
- .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
- {
- imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
- _ => fx.bcx.ins().iconst(types::I32, 0),
- });
- }
- "llvm.x86.avx.psrai.d" => {
- let (a, imm8) = match args {
- [a, imm8] => (a, imm8),
- _ => bug!("wrong number of args for intrinsic {intrinsic}"),
- };
- let a = codegen_operand(fx, a);
- let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
- .expect("llvm.x86.avx.psrai.d imm8 not const");
-
- simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
- .try_to_bits(Size::from_bytes(4))
- .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
- {
- imm8 if imm8 < 32 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
- _ => fx.bcx.ins().iconst(types::I32, 0),
- });
- }
- "llvm.x86.sse2.psrli.q" => {
- let (a, imm8) = match args {
- [a, imm8] => (a, imm8),
- _ => bug!("wrong number of args for intrinsic {intrinsic}"),
- };
- let a = codegen_operand(fx, a);
- let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
- .expect("llvm.x86.avx.psrli.q imm8 not const");
-
- simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
- .try_to_bits(Size::from_bytes(4))
- .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
- {
- imm8 if imm8 < 64 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
- _ => fx.bcx.ins().iconst(types::I32, 0),
- });
- }
- "llvm.x86.sse2.pslli.q" => {
- let (a, imm8) = match args {
- [a, imm8] => (a, imm8),
- _ => bug!("wrong number of args for intrinsic {intrinsic}"),
- };
- let a = codegen_operand(fx, a);
- let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
- .expect("llvm.x86.avx.pslli.q imm8 not const");
-
- simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
- .try_to_bits(Size::from_bytes(4))
- .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
- {
- imm8 if imm8 < 64 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
- _ => fx.bcx.ins().iconst(types::I32, 0),
- });
- }
- "llvm.x86.avx.pslli.d" => {
- let (a, imm8) = match args {
- [a, imm8] => (a, imm8),
- _ => bug!("wrong number of args for intrinsic {intrinsic}"),
- };
- let a = codegen_operand(fx, a);
- let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
- .expect("llvm.x86.avx.pslli.d imm8 not const");
-
- simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
- .try_to_bits(Size::from_bytes(4))
- .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
- {
- imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
- _ => fx.bcx.ins().iconst(types::I32, 0),
- });
- }
- "llvm.x86.avx2.psrli.w" => {
- let (a, imm8) = match args {
- [a, imm8] => (a, imm8),
- _ => bug!("wrong number of args for intrinsic {intrinsic}"),
- };
- let a = codegen_operand(fx, a);
- let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
- .expect("llvm.x86.avx.psrli.w imm8 not const");
-
- simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
- .try_to_bits(Size::from_bytes(4))
- .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
- {
- imm8 if imm8 < 16 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
- _ => fx.bcx.ins().iconst(types::I32, 0),
- });
- }
- "llvm.x86.avx2.psrai.w" => {
- let (a, imm8) = match args {
- [a, imm8] => (a, imm8),
- _ => bug!("wrong number of args for intrinsic {intrinsic}"),
- };
- let a = codegen_operand(fx, a);
- let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
- .expect("llvm.x86.avx.psrai.w imm8 not const");
-
- simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
- .try_to_bits(Size::from_bytes(4))
- .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
- {
- imm8 if imm8 < 16 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
- _ => fx.bcx.ins().iconst(types::I32, 0),
- });
- }
- "llvm.x86.avx2.pslli.w" => {
- let (a, imm8) = match args {
- [a, imm8] => (a, imm8),
- _ => bug!("wrong number of args for intrinsic {intrinsic}"),
- };
- let a = codegen_operand(fx, a);
- let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
- .expect("llvm.x86.avx.pslli.w imm8 not const");
-
- simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
- .try_to_bits(Size::from_bytes(4))
- .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
- {
- imm8 if imm8 < 16 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
- _ => fx.bcx.ins().iconst(types::I32, 0),
- });
- }
"llvm.x86.ssse3.pshuf.b.128" | "llvm.x86.avx2.pshuf.b" => {
let (a, b) = match args {
[a, b] => (a, b),
@@ -506,14 +268,6 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
ret.place_lane(fx, 2).to_ptr().store(fx, res_2, MemFlags::trusted());
ret.place_lane(fx, 3).to_ptr().store(fx, res_3, MemFlags::trusted());
}
- "llvm.x86.sse2.storeu.dq" | "llvm.x86.sse2.storeu.pd" => {
- intrinsic_args!(fx, args => (mem_addr, a); intrinsic);
- let mem_addr = mem_addr.load_scalar(fx);
-
- // FIXME correctly handle the unalignment
- let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
- dest.write_cvalue(fx, a);
- }
"llvm.x86.ssse3.pabs.b.128" | "llvm.x86.ssse3.pabs.w.128" | "llvm.x86.ssse3.pabs.d.128" => {
let a = match args {
[a] => a,
@@ -571,8 +325,6 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
// llvm.x86.avx2.vperm2i128
// llvm.x86.ssse3.pshuf.b.128
// llvm.x86.avx2.pshuf.b
-// llvm.x86.avx2.psrli.w
-// llvm.x86.sse2.psrli.w
fn llvm_add_sub<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
index 9863e40b5..6efbe1498 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
@@ -21,7 +21,7 @@ fn report_simd_type_validation_error(
pub(super) fn codegen_simd_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: Symbol,
- _args: GenericArgsRef<'tcx>,
+ generic_args: GenericArgsRef<'tcx>,
args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>,
target: BasicBlock,
@@ -117,6 +117,54 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
});
}
+ // simd_shuffle_generic<T, U, const I: &[u32]>(x: T, y: T) -> U
+ sym::simd_shuffle_generic => {
+ let [x, y] = args else {
+ bug!("wrong number of args for intrinsic {intrinsic}");
+ };
+ let x = codegen_operand(fx, x);
+ let y = codegen_operand(fx, y);
+
+ if !x.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
+ return;
+ }
+
+ let idx = generic_args[2]
+ .expect_const()
+ .eval(fx.tcx, ty::ParamEnv::reveal_all(), Some(span))
+ .unwrap()
+ .unwrap_branch();
+
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+
+ assert_eq!(lane_ty, ret_lane_ty);
+ assert_eq!(idx.len() as u64, ret_lane_count);
+
+ let total_len = lane_count * 2;
+
+ let indexes =
+ idx.iter().map(|idx| idx.unwrap_leaf().try_to_u16().unwrap()).collect::<Vec<u16>>();
+
+ for &idx in &indexes {
+ assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
+ }
+
+ for (out_idx, in_idx) in indexes.into_iter().enumerate() {
+ let in_lane = if u64::from(in_idx) < lane_count {
+ x.value_lane(fx, in_idx.into())
+ } else {
+ y.value_lane(fx, u64::from(in_idx) - lane_count)
+ };
+ let out_lane = ret.place_lane(fx, u64::try_from(out_idx).unwrap());
+ out_lane.write_cvalue(fx, in_lane);
+ }
+ }
+
// simd_shuffle<T, I, U>(x: T, y: T, idx: I) -> U
sym::simd_shuffle => {
let (x, y, idx) = match args {
@@ -172,7 +220,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
.expect("simd_shuffle idx not const");
let idx_bytes = match idx_const {
- ConstValue::ByRef { alloc, offset } => {
+ ConstValue::Indirect { alloc_id, offset } => {
+ let alloc = fx.tcx.global_alloc(alloc_id).unwrap_memory();
let size = Size::from_bytes(
4 * ret_lane_count, /* size_of([u32; ret_lane_count]) */
);
diff --git a/compiler/rustc_codegen_cranelift/src/unsize.rs b/compiler/rustc_codegen_cranelift/src/unsize.rs
index 6aeba13f6..c6133f2b3 100644
--- a/compiler/rustc_codegen_cranelift/src/unsize.rs
+++ b/compiler/rustc_codegen_cranelift/src/unsize.rs
@@ -88,7 +88,8 @@ fn unsize_ptr<'tcx>(
let src_f = src_layout.field(fx, i);
assert_eq!(src_layout.fields.offset(i).bytes(), 0);
assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
- if src_f.is_zst() {
+ if src_f.is_1zst() {
+ // We are looking for the one non-1-ZST field; this is not it.
continue;
}
assert_eq!(src_layout.size, src_f.size);
@@ -151,6 +152,7 @@ pub(crate) fn coerce_unsized_into<'tcx>(
let dst_f = dst.place_field(fx, FieldIdx::new(i));
if dst_f.layout().is_zst() {
+ // No data here, nothing to copy/coerce.
continue;
}
diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
index ff95141ce..45893a4f3 100644
--- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs
+++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
@@ -674,7 +674,9 @@ impl<'tcx> CPlace<'tcx> {
}
}
- pub(crate) fn place_opaque_cast(
+ /// Used for `ProjectionElem::Subtype`, `ty` has to be monomorphized before
+ /// passed on.
+ pub(crate) fn place_transmute_type(
self,
fx: &mut FunctionCx<'_, '_, 'tcx>,
ty: Ty<'tcx>,
diff --git a/compiler/rustc_codegen_cranelift/src/vtable.rs b/compiler/rustc_codegen_cranelift/src/vtable.rs
index b309695c1..41ea0b122 100644
--- a/compiler/rustc_codegen_cranelift/src/vtable.rs
+++ b/compiler/rustc_codegen_cranelift/src/vtable.rs
@@ -48,19 +48,12 @@ pub(crate) fn get_ptr_and_method_ref<'tcx>(
) -> (Pointer, Value) {
let (ptr, vtable) = 'block: {
if let Abi::Scalar(_) = arg.layout().abi {
- 'descend_newtypes: while !arg.layout().ty.is_unsafe_ptr() && !arg.layout().ty.is_ref() {
- for i in 0..arg.layout().fields.count() {
- let field = arg.value_field(fx, FieldIdx::new(i));
- if !field.layout().is_zst() {
- // we found the one non-zero-sized field that is allowed
- // now find *its* non-zero-sized field, or stop if it's a
- // pointer
- arg = field;
- continue 'descend_newtypes;
- }
- }
-
- bug!("receiver has no non-zero-sized fields {:?}", arg);
+ while !arg.layout().ty.is_unsafe_ptr() && !arg.layout().ty.is_ref() {
+ let (idx, _) = arg
+ .layout()
+ .non_1zst_field(fx)
+ .expect("not exactly one non-1-ZST field in a `DispatchFromDyn` type");
+ arg = arg.value_field(fx, FieldIdx::new(idx));
}
}
diff --git a/compiler/rustc_codegen_gcc/src/abi.rs b/compiler/rustc_codegen_gcc/src/abi.rs
index 6fb1cbfad..a49530ebb 100644
--- a/compiler/rustc_codegen_gcc/src/abi.rs
+++ b/compiler/rustc_codegen_gcc/src/abi.rs
@@ -113,7 +113,7 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
match self.ret.mode {
PassMode::Ignore => cx.type_void(),
PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx),
- PassMode::Cast(ref cast, _) => cast.gcc_type(cx),
+ PassMode::Cast { ref cast, .. } => cast.gcc_type(cx),
PassMode::Indirect { .. } => {
argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
cx.type_void()
@@ -125,25 +125,25 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Ignore => continue,
PassMode::Direct(_) => arg.layout.immediate_gcc_type(cx),
PassMode::Pair(..) => {
- argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 0, true));
- argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 1, true));
+ argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 0));
+ argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 1));
continue;
}
- PassMode::Indirect { extra_attrs: Some(_), .. } => {
+ PassMode::Indirect { meta_attrs: Some(_), .. } => {
unimplemented!();
}
- PassMode::Cast(ref cast, pad_i32) => {
+ PassMode::Cast { ref cast, pad_i32 } => {
// add padding
if pad_i32 {
argument_tys.push(Reg::i32().gcc_type(cx));
}
cast.gcc_type(cx)
}
- PassMode::Indirect { extra_attrs: None, on_stack: true, .. } => {
+ PassMode::Indirect { meta_attrs: None, on_stack: true, .. } => {
on_stack_param_indices.insert(argument_tys.len());
arg.memory_ty(cx)
},
- PassMode::Indirect { extra_attrs: None, on_stack: false, .. } => cx.type_ptr_to(arg.memory_ty(cx)),
+ PassMode::Indirect { meta_attrs: None, on_stack: false, .. } => cx.type_ptr_to(arg.memory_ty(cx)),
};
argument_tys.push(arg_ty);
}
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
index 0b1f2fe6a..308cb04ca 100644
--- a/compiler/rustc_codegen_gcc/src/builder.rs
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -821,7 +821,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
let mut load = |i, scalar: &abi::Scalar, align| {
let llptr = self.struct_gep(pair_type, place.llval, i as u64);
- let llty = place.layout.scalar_pair_element_gcc_type(self, i, false);
+ let llty = place.layout.scalar_pair_element_gcc_type(self, i);
let load = self.load(llty, llptr, align);
scalar_load_metadata(self, load, scalar);
if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
diff --git a/compiler/rustc_codegen_gcc/src/callee.rs b/compiler/rustc_codegen_gcc/src/callee.rs
index a96bd66ba..9fc77627b 100644
--- a/compiler/rustc_codegen_gcc/src/callee.rs
+++ b/compiler/rustc_codegen_gcc/src/callee.rs
@@ -100,7 +100,7 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>)
// whether we are sharing generics or not. The important thing here is
// that the visibility we apply to the declaration is the same one that
// has been applied to the definition (wherever that definition may be).
- let is_generic = instance.args.non_erasable_generics().next().is_some();
+ let is_generic = instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some();
if is_generic {
// This is a monomorphization. Its expected visibility depends
diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs
index 88dcafa73..dcebd92a6 100644
--- a/compiler/rustc_codegen_gcc/src/context.rs
+++ b/compiler/rustc_codegen_gcc/src/context.rs
@@ -7,6 +7,7 @@ use rustc_codegen_ssa::traits::{
BaseTypeMethods,
MiscMethods,
};
+use rustc_codegen_ssa::errors as ssa_errors;
use rustc_data_structures::base_n;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_middle::span_bug;
@@ -479,7 +480,7 @@ impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> {
if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
self.sess().emit_fatal(respan(span, err.into_diagnostic()))
} else {
- span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
+ self.tcx.sess.emit_fatal(ssa_errors::FailedToGetLayout { span, ty, err })
}
}
}
diff --git a/compiler/rustc_codegen_gcc/src/debuginfo.rs b/compiler/rustc_codegen_gcc/src/debuginfo.rs
index a81585d41..d1bfd833c 100644
--- a/compiler/rustc_codegen_gcc/src/debuginfo.rs
+++ b/compiler/rustc_codegen_gcc/src/debuginfo.rs
@@ -55,7 +55,7 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
_fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
_llfn: RValue<'gcc>,
_mir: &mir::Body<'tcx>,
- ) -> Option<FunctionDebugContext<Self::DIScope, Self::DILocation>> {
+ ) -> Option<FunctionDebugContext<'tcx, Self::DIScope, Self::DILocation>> {
// TODO(antoyo)
None
}
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
index f8c32c6db..68a087a1d 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
@@ -144,7 +144,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
sym::volatile_load | sym::unaligned_volatile_load => {
let tp_ty = fn_args.type_at(0);
let mut ptr = args[0].immediate();
- if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
+ if let PassMode::Cast { cast: ty, .. } = &fn_abi.ret.mode {
ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self)));
}
let load = self.volatile_load(ptr.get_type(), ptr);
@@ -353,7 +353,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
};
if !fn_abi.ret.is_ignore() {
- if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
+ if let PassMode::Cast { cast: ty, .. } = &fn_abi.ret.mode {
let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
let ptr = self.pointercast(result.llval, ptr_llty);
self.store(llval, ptr, result.align);
@@ -449,7 +449,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
else if self.is_unsized_indirect() {
bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
}
- else if let PassMode::Cast(ref cast, _) = self.mode {
+ else if let PassMode::Cast { ref cast, .. } = self.mode {
// FIXME(eddyb): Figure out when the simpler Store is safe, clang
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
let can_store_through_cast_ptr = false;
@@ -511,10 +511,10 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
PassMode::Pair(..) => {
OperandValue::Pair(next(), next()).store(bx, dst);
},
- PassMode::Indirect { extra_attrs: Some(_), .. } => {
+ PassMode::Indirect { meta_attrs: Some(_), .. } => {
OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
},
- PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(..) => {
+ PassMode::Direct(_) | PassMode::Indirect { meta_attrs: None, .. } | PassMode::Cast { .. } => {
let next_arg = next();
self.store(bx, next_arg, dst);
},
diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs
index 697ae015f..ce7e31682 100644
--- a/compiler/rustc_codegen_gcc/src/lib.rs
+++ b/compiler/rustc_codegen_gcc/src/lib.rs
@@ -80,7 +80,7 @@ use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, Handler, SubdiagnosticMes
use rustc_fluent_macro::fluent_messages;
use rustc_metadata::EncodedMetadata;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
-use rustc_middle::query::Providers;
+use rustc_middle::util::Providers;
use rustc_middle::ty::TyCtxt;
use rustc_session::config::{Lto, OptLevel, OutputFilenames};
use rustc_session::Session;
diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs
index 84d578385..cc467801b 100644
--- a/compiler/rustc_codegen_gcc/src/type_of.rs
+++ b/compiler/rustc_codegen_gcc/src/type_of.rs
@@ -4,7 +4,7 @@ use gccjit::{Struct, Type};
use crate::rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods};
use rustc_middle::bug;
use rustc_middle::ty::{self, Ty, TypeVisitableExt};
-use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_target::abi::{self, Abi, Align, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants};
use rustc_target::abi::call::{CastTarget, FnAbi, Reg};
@@ -74,8 +74,8 @@ fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout
Abi::ScalarPair(..) => {
return cx.type_struct(
&[
- layout.scalar_pair_element_gcc_type(cx, 0, false),
- layout.scalar_pair_element_gcc_type(cx, 1, false),
+ layout.scalar_pair_element_gcc_type(cx, 0),
+ layout.scalar_pair_element_gcc_type(cx, 1),
],
false,
);
@@ -150,7 +150,7 @@ pub trait LayoutGccExt<'tcx> {
fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>;
- fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc>;
+ fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize) -> Type<'gcc>;
fn gcc_field_index(&self, index: usize) -> u64;
fn pointee_info_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, offset: Size) -> Option<PointeeInfo>;
}
@@ -182,23 +182,16 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
/// of that field's type - this is useful for taking the address of
/// that field and ensuring the struct has the right alignment.
fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+ // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
+ // In other words, this should generally not look at the type at all, but only at the
+ // layout.
if let Abi::Scalar(ref scalar) = self.abi {
// Use a different cache for scalars because pointers to DSTs
// can be either fat or thin (data pointers of fat pointers).
if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) {
return ty;
}
- let ty =
- match *self.ty.kind() {
- ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
- cx.type_ptr_to(cx.layout_of(ty).gcc_type(cx))
- }
- ty::Adt(def, _) if def.is_box() => {
- cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx))
- }
- ty::FnPtr(sig) => cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty())),
- _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO),
- };
+ let ty = self.scalar_gcc_type_at(cx, scalar, Size::ZERO);
cx.scalar_types.borrow_mut().insert(self.ty, ty);
return ty;
}
@@ -272,23 +265,10 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
}
}
- fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc> {
- // TODO(antoyo): remove llvm hack:
- // HACK(eddyb) special-case fat pointers until LLVM removes
- // pointee types, to avoid bitcasting every `OperandRef::deref`.
- match self.ty.kind() {
- ty::Ref(..) | ty::RawPtr(_) => {
- return self.field(cx, index).gcc_type(cx);
- }
- // only wide pointer boxes are handled as pointers
- // thin pointer boxes with scalar allocators are handled by the general logic below
- ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => {
- let ptr_ty = Ty::new_mut_ptr(cx.tcx,self.ty.boxed_ty());
- return cx.layout_of(ptr_ty).scalar_pair_element_gcc_type(cx, index, immediate);
- }
- _ => {}
- }
-
+ fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize) -> Type<'gcc> {
+ // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
+ // In other words, this should generally not look at the type at all, but only at the
+ // layout.
let (a, b) = match self.abi {
Abi::ScalarPair(ref a, ref b) => (a, b),
_ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
@@ -367,8 +347,8 @@ impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
layout.gcc_field_index(index)
}
- fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, immediate: bool) -> Type<'gcc> {
- layout.scalar_pair_element_gcc_type(self, index, immediate)
+ fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, _immediate: bool) -> Type<'gcc> {
+ layout.scalar_pair_element_gcc_type(self, index)
}
fn cast_backend_type(&self, ty: &CastTarget) -> Type<'gcc> {
diff --git a/compiler/rustc_codegen_llvm/messages.ftl b/compiler/rustc_codegen_llvm/messages.ftl
index aed4a8f3c..c0cfe39f1 100644
--- a/compiler/rustc_codegen_llvm/messages.ftl
+++ b/compiler/rustc_codegen_llvm/messages.ftl
@@ -37,6 +37,8 @@ codegen_llvm_lto_disallowed = lto can only be run for executables, cdylibs and s
codegen_llvm_lto_dylib = lto cannot be used for `dylib` crate type without `-Zdylib-lto`
+codegen_llvm_lto_proc_macro = lto cannot be used for `proc-macro` crate type without `-Zdylib-lto`
+
codegen_llvm_missing_features =
add the missing features in a `target_feature` attribute
@@ -83,6 +85,8 @@ codegen_llvm_unknown_ctarget_feature_prefix =
unknown feature specified for `-Ctarget-feature`: `{$feature}`
.note = features must begin with a `+` to enable or `-` to disable it
+codegen_llvm_unknown_debuginfo_compression = unknown debuginfo compression algorithm {$algorithm} - will fall back to uncompressed debuginfo
+
codegen_llvm_write_bytecode = failed to write bytecode to {$path}: {$err}
codegen_llvm_write_ir = failed to write LLVM IR to {$path}
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index c6a7dc95d..9e834b83d 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -211,7 +211,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
} else if self.is_unsized_indirect() {
bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
- } else if let PassMode::Cast(cast, _) = &self.mode {
+ } else if let PassMode::Cast { cast, pad_i32: _ } = &self.mode {
// FIXME(eddyb): Figure out when the simpler Store is safe, clang
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
let can_store_through_cast_ptr = false;
@@ -274,12 +274,12 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
PassMode::Pair(..) => {
OperandValue::Pair(next(), next()).store(bx, dst);
}
- PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
}
PassMode::Direct(_)
- | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ }
- | PassMode::Cast(..) => {
+ | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ }
+ | PassMode::Cast { .. } => {
let next_arg = next();
self.store(bx, next_arg, dst);
}
@@ -332,7 +332,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
let llreturn_ty = match &self.ret.mode {
PassMode::Ignore => cx.type_void(),
PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
- PassMode::Cast(cast, _) => cast.llvm_type(cx),
+ PassMode::Cast { cast, pad_i32: _ } => cast.llvm_type(cx),
PassMode::Indirect { .. } => {
llargument_tys.push(cx.type_ptr());
cx.type_void()
@@ -340,29 +340,78 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
};
for arg in args {
+ // Note that the exact number of arguments pushed here is carefully synchronized with
+ // code all over the place, both in the codegen_llvm and codegen_ssa crates. That's how
+ // other code then knows which LLVM argument(s) correspond to the n-th Rust argument.
let llarg_ty = match &arg.mode {
PassMode::Ignore => continue,
- PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
+ PassMode::Direct(_) => {
+ // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
+ // and for Scalar ABIs the LLVM type is fully determined by `layout.abi`,
+ // guarnateeing that we generate ABI-compatible LLVM IR. Things get tricky for
+ // aggregates...
+ if matches!(arg.layout.abi, abi::Abi::Aggregate { .. }) {
+ assert!(
+ arg.layout.is_sized(),
+ "`PassMode::Direct` for unsized type: {}",
+ arg.layout.ty
+ );
+ // This really shouldn't happen, since `immediate_llvm_type` will use
+ // `layout.fields` to turn this Rust type into an LLVM type. This means all
+ // sorts of Rust type details leak into the ABI. However wasm sadly *does*
+ // currently use this mode so we have to allow it -- but we absolutely
+ // shouldn't let any more targets do that.
+ // (Also see <https://github.com/rust-lang/rust/issues/115666>.)
+ assert!(
+ matches!(&*cx.tcx.sess.target.arch, "wasm32" | "wasm64"),
+ "`PassMode::Direct` for aggregates only allowed on wasm targets\nProblematic type: {:#?}",
+ arg.layout,
+ );
+ }
+ arg.layout.immediate_llvm_type(cx)
+ }
PassMode::Pair(..) => {
+ // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
+ // so for ScalarPair we can easily be sure that we are generating ABI-compatible
+ // LLVM IR.
+ assert!(
+ matches!(arg.layout.abi, abi::Abi::ScalarPair(..)),
+ "PassMode::Pair for type {}",
+ arg.layout.ty
+ );
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
continue;
}
- PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack } => {
+ // `Indirect` with metadata is only for unsized types, and doesn't work with
+ // on-stack passing.
+ assert!(arg.layout.is_unsized() && !on_stack);
+ // Construct the type of a (wide) pointer to `ty`, and pass its two fields.
+ // Any two ABI-compatible unsized types have the same metadata type and
+ // moreover the same metadata value leads to the same dynamic size and
+ // alignment, so this respects ABI compatibility.
let ptr_ty = Ty::new_mut_ptr(cx.tcx, arg.layout.ty);
let ptr_layout = cx.layout_of(ptr_ty);
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
continue;
}
- PassMode::Cast(cast, pad_i32) => {
+ PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
+ assert!(arg.layout.is_sized());
+ cx.type_ptr()
+ }
+ PassMode::Cast { cast, pad_i32 } => {
+ // `Cast` means "transmute to `CastType`"; that only makes sense for sized types.
+ assert!(arg.layout.is_sized());
// add padding
if *pad_i32 {
llargument_tys.push(Reg::i32().llvm_type(cx));
}
+ // Compute the LLVM type we use for this function from the cast type.
+ // We assume here that ABI-compatible Rust types have the same cast type.
cast.llvm_type(cx)
}
- PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => cx.type_ptr(),
};
llargument_tys.push(llarg_ty);
}
@@ -405,13 +454,13 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Direct(attrs) => {
attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
}
- PassMode::Indirect { attrs, extra_attrs: _, on_stack } => {
+ PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
assert!(!on_stack);
let i = apply(attrs);
let sret = llvm::CreateStructRetAttr(cx.llcx, self.ret.layout.llvm_type(cx));
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
}
- PassMode::Cast(cast, _) => {
+ PassMode::Cast { cast, pad_i32: _ } => {
cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
}
_ => {}
@@ -419,25 +468,25 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
for arg in self.args.iter() {
match &arg.mode {
PassMode::Ignore => {}
- PassMode::Indirect { attrs, extra_attrs: None, on_stack: true } => {
+ PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
let i = apply(attrs);
let byval = llvm::CreateByValAttr(cx.llcx, arg.layout.llvm_type(cx));
attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]);
}
PassMode::Direct(attrs)
- | PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => {
+ | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
apply(attrs);
}
- PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => {
+ PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => {
assert!(!on_stack);
apply(attrs);
- apply(extra_attrs);
+ apply(meta_attrs);
}
PassMode::Pair(a, b) => {
apply(a);
apply(b);
}
- PassMode::Cast(cast, pad_i32) => {
+ PassMode::Cast { cast, pad_i32 } => {
if *pad_i32 {
apply(&ArgAttributes::new());
}
@@ -467,13 +516,13 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Direct(attrs) => {
attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite);
}
- PassMode::Indirect { attrs, extra_attrs: _, on_stack } => {
+ PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
assert!(!on_stack);
let i = apply(bx.cx, attrs);
let sret = llvm::CreateStructRetAttr(bx.cx.llcx, self.ret.layout.llvm_type(bx));
attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]);
}
- PassMode::Cast(cast, _) => {
+ PassMode::Cast { cast, pad_i32: _ } => {
cast.attrs.apply_attrs_to_callsite(
llvm::AttributePlace::ReturnValue,
&bx.cx,
@@ -495,7 +544,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
for arg in self.args.iter() {
match &arg.mode {
PassMode::Ignore => {}
- PassMode::Indirect { attrs, extra_attrs: None, on_stack: true } => {
+ PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
let i = apply(bx.cx, attrs);
let byval = llvm::CreateByValAttr(bx.cx.llcx, arg.layout.llvm_type(bx));
attributes::apply_to_callsite(
@@ -505,18 +554,18 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
);
}
PassMode::Direct(attrs)
- | PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => {
+ | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
apply(bx.cx, attrs);
}
- PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack: _ } => {
+ PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack: _ } => {
apply(bx.cx, attrs);
- apply(bx.cx, extra_attrs);
+ apply(bx.cx, meta_attrs);
}
PassMode::Pair(a, b) => {
apply(bx.cx, a);
apply(bx.cx, b);
}
- PassMode::Cast(cast, pad_i32) => {
+ PassMode::Cast { cast, pad_i32 } => {
if *pad_i32 {
apply(bx.cx, &ArgAttributes::new());
}
@@ -571,7 +620,9 @@ impl From<Conv> for llvm::CallConv {
Conv::C | Conv::Rust | Conv::CCmseNonSecureCall | Conv::RiscvInterrupt { .. } => {
llvm::CCallConv
}
- Conv::RustCold => llvm::ColdCallConv,
+ Conv::Cold => llvm::ColdCallConv,
+ Conv::PreserveMost => llvm::PreserveMost,
+ Conv::PreserveAll => llvm::PreserveAll,
Conv::AmdGpuKernel => llvm::AmdGpuKernel,
Conv::AvrInterrupt => llvm::AvrInterrupt,
Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
index a82d2c577..f33075a88 100644
--- a/compiler/rustc_codegen_llvm/src/back/archive.rs
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -367,7 +367,7 @@ impl<'a> LlvmArchiveBuilder<'a> {
match addition {
Addition::File { path, name_in_archive } => {
let path = CString::new(path.to_str().unwrap())?;
- let name = CString::new(name_in_archive.clone())?;
+ let name = CString::new(name_in_archive.as_bytes())?;
members.push(llvm::LLVMRustArchiveMemberNew(
path.as_ptr(),
name.as_ptr(),
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index b2d28cef8..a3b0dc6b6 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -1,6 +1,8 @@
-use crate::back::write::{self, save_temp_bitcode, CodegenDiagnosticsStage, DiagnosticHandlers};
+use crate::back::write::{
+ self, bitcode_section_name, save_temp_bitcode, CodegenDiagnosticsStage, DiagnosticHandlers,
+};
use crate::errors::{
- DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib,
+ DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib, LtoProcMacro,
};
use crate::llvm::{self, build_string};
use crate::{LlvmCodegenBackend, ModuleLlvm};
@@ -24,6 +26,7 @@ use std::ffi::{CStr, CString};
use std::fs::File;
use std::io;
use std::iter;
+use std::mem::ManuallyDrop;
use std::path::Path;
use std::slice;
use std::sync::Arc;
@@ -34,8 +37,12 @@ pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin";
pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
match crate_type {
- CrateType::Executable | CrateType::Dylib | CrateType::Staticlib | CrateType::Cdylib => true,
- CrateType::Rlib | CrateType::ProcMacro => false,
+ CrateType::Executable
+ | CrateType::Dylib
+ | CrateType::Staticlib
+ | CrateType::Cdylib
+ | CrateType::ProcMacro => true,
+ CrateType::Rlib => false,
}
}
@@ -85,6 +92,11 @@ fn prepare_lto(
diag_handler.emit_err(LtoDylib);
return Err(FatalError);
}
+ } else if *crate_type == CrateType::ProcMacro {
+ if !cgcx.opts.unstable_opts.dylib_lto {
+ diag_handler.emit_err(LtoProcMacro);
+ return Err(FatalError);
+ }
}
}
@@ -120,6 +132,7 @@ fn prepare_lto(
info!("adding bitcode from {}", name);
match get_bitcode_slice_from_object_data(
child.data(&*archive_data).expect("corrupt rlib"),
+ cgcx,
) {
Ok(data) => {
let module = SerializedModule::FromRlib(data.to_vec());
@@ -141,10 +154,29 @@ fn prepare_lto(
Ok((symbols_below_threshold, upstream_modules))
}
-fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], LtoBitcodeFromRlib> {
+fn get_bitcode_slice_from_object_data<'a>(
+ obj: &'a [u8],
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+) -> Result<&'a [u8], LtoBitcodeFromRlib> {
+ // We're about to assume the data here is an object file with sections, but if it's raw LLVM IR that
+ // won't work. Fortunately, if that's what we have we can just return the object directly, so we sniff
+ // the relevant magic strings here and return.
+ if obj.starts_with(b"\xDE\xC0\x17\x0B") || obj.starts_with(b"BC\xC0\xDE") {
+ return Ok(obj);
+ }
+ // We drop the "__LLVM," prefix here because on Apple platforms there's a notion of "segment name"
+ // which in the public API for sections gets treated as part of the section name, but internally
+ // in MachOObjectFile.cpp gets treated separately.
+ let section_name = bitcode_section_name(cgcx).trim_start_matches("__LLVM,");
let mut len = 0;
- let data =
- unsafe { llvm::LLVMRustGetBitcodeSliceFromObjectData(obj.as_ptr(), obj.len(), &mut len) };
+ let data = unsafe {
+ llvm::LLVMRustGetSliceFromObjectDataByName(
+ obj.as_ptr(),
+ obj.len(),
+ section_name.as_ptr(),
+ &mut len,
+ )
+ };
if !data.is_null() {
assert!(len != 0);
let bc = unsafe { slice::from_raw_parts(data, len) };
@@ -441,7 +473,7 @@ fn thin_lto(
for (i, (name, buffer)) in modules.into_iter().enumerate() {
info!("local module: {} - {}", i, name);
- let cname = CString::new(name.clone()).unwrap();
+ let cname = CString::new(name.as_bytes()).unwrap();
thin_modules.push(llvm::ThinLTOModule {
identifier: cname.as_ptr(),
data: buffer.data().as_ptr(),
@@ -583,7 +615,7 @@ pub(crate) fn run_pass_manager(
module: &mut ModuleCodegen<ModuleLlvm>,
thin: bool,
) -> Result<(), FatalError> {
- let _timer = cgcx.prof.verbose_generic_activity_with_arg("LLVM_lto_optimize", &*module.name);
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_lto_optimize", &*module.name);
let config = cgcx.config(module.kind);
// Now we have one massive module inside of llmod. Time to run the
@@ -705,7 +737,7 @@ pub unsafe fn optimize_thin_module(
let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
let llmod_raw = parse_module(llcx, module_name, thin_module.data(), &diag_handler)? as *const _;
let mut module = ModuleCodegen {
- module_llvm: ModuleLlvm { llmod_raw, llcx, tm },
+ module_llvm: ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) },
name: thin_module.name().to_string(),
kind: ModuleKind::Regular,
};
diff --git a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs
new file mode 100644
index 000000000..36484c3c3
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs
@@ -0,0 +1,103 @@
+use std::{
+ ffi::{c_char, CStr},
+ marker::PhantomData,
+ ops::Deref,
+ ptr::NonNull,
+};
+
+use rustc_data_structures::small_c_str::SmallCStr;
+
+use crate::{errors::LlvmError, llvm};
+
+/// Responsible for safely creating and disposing llvm::TargetMachine via ffi functions.
+/// Not cloneable as there is no clone function for llvm::TargetMachine.
+#[repr(transparent)]
+pub struct OwnedTargetMachine {
+ tm_unique: NonNull<llvm::TargetMachine>,
+ phantom: PhantomData<llvm::TargetMachine>,
+}
+
+impl OwnedTargetMachine {
+ pub fn new(
+ triple: &CStr,
+ cpu: &CStr,
+ features: &CStr,
+ abi: &CStr,
+ model: llvm::CodeModel,
+ reloc: llvm::RelocModel,
+ level: llvm::CodeGenOptLevel,
+ use_soft_fp: bool,
+ function_sections: bool,
+ data_sections: bool,
+ unique_section_names: bool,
+ trap_unreachable: bool,
+ singletree: bool,
+ asm_comments: bool,
+ emit_stack_size_section: bool,
+ relax_elf_relocations: bool,
+ use_init_array: bool,
+ split_dwarf_file: &CStr,
+ output_obj_file: &CStr,
+ debug_info_compression: &CStr,
+ force_emulated_tls: bool,
+ args_cstr_buff: &[u8],
+ ) -> Result<Self, LlvmError<'static>> {
+ assert!(args_cstr_buff.len() > 0);
+ assert!(
+ *args_cstr_buff.last().unwrap() == 0,
+ "The last character must be a null terminator."
+ );
+
+ // SAFETY: llvm::LLVMRustCreateTargetMachine copies pointed to data
+ let tm_ptr = unsafe {
+ llvm::LLVMRustCreateTargetMachine(
+ triple.as_ptr(),
+ cpu.as_ptr(),
+ features.as_ptr(),
+ abi.as_ptr(),
+ model,
+ reloc,
+ level,
+ use_soft_fp,
+ function_sections,
+ data_sections,
+ unique_section_names,
+ trap_unreachable,
+ singletree,
+ asm_comments,
+ emit_stack_size_section,
+ relax_elf_relocations,
+ use_init_array,
+ split_dwarf_file.as_ptr(),
+ output_obj_file.as_ptr(),
+ debug_info_compression.as_ptr(),
+ force_emulated_tls,
+ args_cstr_buff.as_ptr() as *const c_char,
+ args_cstr_buff.len(),
+ )
+ };
+
+ NonNull::new(tm_ptr)
+ .map(|tm_unique| Self { tm_unique, phantom: PhantomData })
+ .ok_or_else(|| LlvmError::CreateTargetMachine { triple: SmallCStr::from(triple) })
+ }
+}
+
+impl Deref for OwnedTargetMachine {
+ type Target = llvm::TargetMachine;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: constructing ensures we have a valid pointer created by llvm::LLVMRustCreateTargetMachine
+ unsafe { self.tm_unique.as_ref() }
+ }
+}
+
+impl Drop for OwnedTargetMachine {
+ fn drop(&mut self) {
+ // SAFETY: constructing ensures we have a valid pointer created by llvm::LLVMRustCreateTargetMachine
+ // OwnedTargetMachine is not copyable so there is no double free or use after free
+ unsafe {
+ llvm::LLVMRustDisposeTargetMachine(self.tm_unique.as_mut());
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index 47cc5bd52..c778a6e01 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -1,17 +1,22 @@
use crate::back::lto::ThinBuffer;
+use crate::back::owned_target_machine::OwnedTargetMachine;
use crate::back::profiling::{
selfprofile_after_pass_callback, selfprofile_before_pass_callback, LlvmSelfProfiler,
};
use crate::base;
use crate::common;
use crate::errors::{
- CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, WithLlvmError, WriteBytecode,
+ CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, UnknownCompression,
+ WithLlvmError, WriteBytecode,
};
use crate::llvm::{self, DiagnosticInfo, PassManager};
use crate::llvm_util;
use crate::type_::Type;
use crate::LlvmCodegenBackend;
use crate::ModuleLlvm;
+use llvm::{
+ LLVMRustLLVMHasZlibCompressionForDebugSymbols, LLVMRustLLVMHasZstdCompressionForDebugSymbols,
+};
use rustc_codegen_ssa::back::link::ensure_removed;
use rustc_codegen_ssa::back::write::{
BitcodeSection, CodegenContext, EmitObj, ModuleConfig, TargetMachineFactoryConfig,
@@ -94,8 +99,8 @@ pub fn write_output_file<'ll>(
}
}
-pub fn create_informational_target_machine(sess: &Session) -> &'static mut llvm::TargetMachine {
- let config = TargetMachineFactoryConfig { split_dwarf_file: None };
+pub fn create_informational_target_machine(sess: &Session) -> OwnedTargetMachine {
+ let config = TargetMachineFactoryConfig { split_dwarf_file: None, output_obj_file: None };
// Can't use query system here quite yet because this function is invoked before the query
// system/tcx is set up.
let features = llvm_util::global_llvm_features(sess, false);
@@ -103,7 +108,7 @@ pub fn create_informational_target_machine(sess: &Session) -> &'static mut llvm:
.unwrap_or_else(|err| llvm_err(sess.diagnostic(), err).raise())
}
-pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut llvm::TargetMachine {
+pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTargetMachine {
let split_dwarf_file = if tcx.sess.target_can_use_split_dwarf() {
tcx.output_filenames(()).split_dwarf_path(
tcx.sess.split_debuginfo(),
@@ -113,7 +118,11 @@ pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut ll
} else {
None
};
- let config = TargetMachineFactoryConfig { split_dwarf_file };
+
+ let output_obj_file =
+ Some(tcx.output_filenames(()).temp_path(OutputType::Object, Some(mod_name)));
+ let config = TargetMachineFactoryConfig { split_dwarf_file, output_obj_file };
+
target_machine_factory(
&tcx.sess,
tcx.backend_optimization_level(()),
@@ -216,36 +225,73 @@ pub fn target_machine_factory(
let force_emulated_tls = sess.target.force_emulated_tls;
+ // copy the exe path, followed by path all into one buffer
+ // null terminating them so we can use them as null terminated strings
+ let args_cstr_buff = {
+ let mut args_cstr_buff: Vec<u8> = Vec::new();
+ let exe_path = std::env::current_exe().unwrap_or_default();
+ let exe_path_str = exe_path.into_os_string().into_string().unwrap_or_default();
+
+ args_cstr_buff.extend_from_slice(exe_path_str.as_bytes());
+ args_cstr_buff.push(0);
+
+ for arg in sess.expanded_args.iter() {
+ args_cstr_buff.extend_from_slice(arg.as_bytes());
+ args_cstr_buff.push(0);
+ }
+
+ args_cstr_buff
+ };
+
+ let debuginfo_compression = sess.opts.debuginfo_compression.to_string();
+ match sess.opts.debuginfo_compression {
+ rustc_session::config::DebugInfoCompression::Zlib => {
+ if !unsafe { LLVMRustLLVMHasZlibCompressionForDebugSymbols() } {
+ sess.emit_warning(UnknownCompression { algorithm: "zlib" });
+ }
+ }
+ rustc_session::config::DebugInfoCompression::Zstd => {
+ if !unsafe { LLVMRustLLVMHasZstdCompressionForDebugSymbols() } {
+ sess.emit_warning(UnknownCompression { algorithm: "zstd" });
+ }
+ }
+ rustc_session::config::DebugInfoCompression::None => {}
+ };
+ let debuginfo_compression = SmallCStr::new(&debuginfo_compression);
+
Arc::new(move |config: TargetMachineFactoryConfig| {
- let split_dwarf_file =
- path_mapping.map_prefix(config.split_dwarf_file.unwrap_or_default()).0;
- let split_dwarf_file = CString::new(split_dwarf_file.to_str().unwrap()).unwrap();
-
- let tm = unsafe {
- llvm::LLVMRustCreateTargetMachine(
- triple.as_ptr(),
- cpu.as_ptr(),
- features.as_ptr(),
- abi.as_ptr(),
- code_model,
- reloc_model,
- opt_level,
- use_softfp,
- ffunction_sections,
- fdata_sections,
- funique_section_names,
- trap_unreachable,
- singlethread,
- asm_comments,
- emit_stack_size_section,
- relax_elf_relocations,
- use_init_array,
- split_dwarf_file.as_ptr(),
- force_emulated_tls,
- )
+ let path_to_cstring_helper = |path: Option<PathBuf>| -> CString {
+ let path = path_mapping.map_prefix(path.unwrap_or_default()).0;
+ CString::new(path.to_str().unwrap()).unwrap()
};
- tm.ok_or_else(|| LlvmError::CreateTargetMachine { triple: triple.clone() })
+ let split_dwarf_file = path_to_cstring_helper(config.split_dwarf_file);
+ let output_obj_file = path_to_cstring_helper(config.output_obj_file);
+
+ OwnedTargetMachine::new(
+ &triple,
+ &cpu,
+ &features,
+ &abi,
+ code_model,
+ reloc_model,
+ opt_level,
+ use_softfp,
+ ffunction_sections,
+ fdata_sections,
+ funique_section_names,
+ trap_unreachable,
+ singlethread,
+ asm_comments,
+ emit_stack_size_section,
+ relax_elf_relocations,
+ use_init_array,
+ &split_dwarf_file,
+ &output_obj_file,
+ &debuginfo_compression,
+ force_emulated_tls,
+ &args_cstr_buff,
+ )
})
}
@@ -853,6 +899,27 @@ fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data:
asm
}
+fn target_is_apple(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
+ cgcx.opts.target_triple.triple().contains("-ios")
+ || cgcx.opts.target_triple.triple().contains("-darwin")
+ || cgcx.opts.target_triple.triple().contains("-tvos")
+ || cgcx.opts.target_triple.triple().contains("-watchos")
+}
+
+fn target_is_aix(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
+ cgcx.opts.target_triple.triple().contains("-aix")
+}
+
+pub(crate) fn bitcode_section_name(cgcx: &CodegenContext<LlvmCodegenBackend>) -> &'static str {
+ if target_is_apple(cgcx) {
+ "__LLVM,__bitcode\0"
+ } else if target_is_aix(cgcx) {
+ ".ipa\0"
+ } else {
+ ".llvmbc\0"
+ }
+}
+
/// Embed the bitcode of an LLVM module in the LLVM module itself.
///
/// This is done primarily for iOS where it appears to be standard to compile C
@@ -913,11 +980,8 @@ unsafe fn embed_bitcode(
// Unfortunately, LLVM provides no way to set custom section flags. For ELF
// and COFF we emit the sections using module level inline assembly for that
// reason (see issue #90326 for historical background).
- let is_aix = cgcx.opts.target_triple.triple().contains("-aix");
- let is_apple = cgcx.opts.target_triple.triple().contains("-ios")
- || cgcx.opts.target_triple.triple().contains("-darwin")
- || cgcx.opts.target_triple.triple().contains("-tvos")
- || cgcx.opts.target_triple.triple().contains("-watchos");
+ let is_aix = target_is_aix(cgcx);
+ let is_apple = target_is_apple(cgcx);
if is_apple
|| is_aix
|| cgcx.opts.target_triple.triple().starts_with("wasm")
@@ -932,13 +996,7 @@ unsafe fn embed_bitcode(
);
llvm::LLVMSetInitializer(llglobal, llconst);
- let section = if is_apple {
- "__LLVM,__bitcode\0"
- } else if is_aix {
- ".ipa\0"
- } else {
- ".llvmbc\0"
- };
+ let section = bitcode_section_name(cgcx);
llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
index 36c098218..5254c3f9c 100644
--- a/compiler/rustc_codegen_llvm/src/callee.rs
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -95,7 +95,8 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
unsafe {
llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage);
- let is_generic = instance.args.non_erasable_generics().next().is_some();
+ let is_generic =
+ instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some();
if is_generic {
// This is a monomorphization. Its expected visibility depends
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 24fd5bbf8..b4b2ab1e1 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -10,6 +10,7 @@ use crate::value::Value;
use cstr::cstr;
use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh};
+use rustc_codegen_ssa::errors as ssa_errors;
use rustc_codegen_ssa::traits::*;
use rustc_data_structures::base_n;
use rustc_data_structures::fx::FxHashMap;
@@ -159,9 +160,9 @@ pub unsafe fn create_module<'ll>(
// Ensure the data-layout values hardcoded remain the defaults.
if sess.target.is_builtin {
+ // tm is disposed by its drop impl
let tm = crate::back::write::create_informational_target_machine(tcx.sess);
- llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
- llvm::LLVMRustDisposeTargetMachine(tm);
+ llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, &tm);
let llvm_data_layout = llvm::LLVMGetDataLayoutStr(llmod);
let llvm_data_layout = str::from_utf8(CStr::from_ptr(llvm_data_layout).to_bytes())
@@ -1000,7 +1001,7 @@ impl<'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
self.sess().emit_fatal(Spanned { span, node: err.into_diagnostic() })
} else {
- span_bug!(span, "failed to get layout for `{ty}`: {err:?}")
+ self.tcx.sess.emit_fatal(ssa_errors::FailedToGetLayout { span, ty, err })
}
}
}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
index 7a82d05ce..763186a58 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
@@ -1,4 +1,4 @@
-use rustc_middle::mir::coverage::{CounterId, MappedExpressionIndex};
+use rustc_middle::mir::coverage::{CounterId, ExpressionId, Operand};
/// Must match the layout of `LLVMRustCounterKind`.
#[derive(Copy, Clone, Debug)]
@@ -30,11 +30,8 @@ pub struct Counter {
}
impl Counter {
- /// Constructs a new `Counter` of kind `Zero`. For this `CounterKind`, the
- /// `id` is not used.
- pub fn zero() -> Self {
- Self { kind: CounterKind::Zero, id: 0 }
- }
+ /// A `Counter` of kind `Zero`. For this counter kind, the `id` is not used.
+ pub(crate) const ZERO: Self = Self { kind: CounterKind::Zero, id: 0 };
/// Constructs a new `Counter` of kind `CounterValueReference`.
pub fn counter_value_reference(counter_id: CounterId) -> Self {
@@ -42,20 +39,16 @@ impl Counter {
}
/// Constructs a new `Counter` of kind `Expression`.
- pub fn expression(mapped_expression_index: MappedExpressionIndex) -> Self {
- Self { kind: CounterKind::Expression, id: mapped_expression_index.into() }
- }
-
- /// Returns true if the `Counter` kind is `Zero`.
- pub fn is_zero(&self) -> bool {
- matches!(self.kind, CounterKind::Zero)
+ pub(crate) fn expression(expression_id: ExpressionId) -> Self {
+ Self { kind: CounterKind::Expression, id: expression_id.as_u32() }
}
- /// An explicitly-named function to get the ID value, making it more obvious
- /// that the stored value is now 0-based.
- pub fn zero_based_id(&self) -> u32 {
- debug_assert!(!self.is_zero(), "`id` is undefined for CounterKind::Zero");
- self.id
+ pub(crate) fn from_operand(operand: Operand) -> Self {
+ match operand {
+ Operand::Zero => Self::ZERO,
+ Operand::Counter(id) => Self::counter_value_reference(id),
+ Operand::Expression(id) => Self::expression(id),
+ }
}
}
@@ -81,6 +74,11 @@ pub struct CounterExpression {
}
impl CounterExpression {
+ /// The dummy expression `(0 - 0)` has a representation of all zeroes,
+ /// making it marginally more efficient to initialize than `(0 + 0)`.
+ pub(crate) const DUMMY: Self =
+ Self { lhs: Counter::ZERO, kind: ExprKind::Subtract, rhs: Counter::ZERO };
+
pub fn new(lhs: Counter, kind: ExprKind, rhs: Counter) -> Self {
Self { kind, lhs, rhs }
}
@@ -172,7 +170,7 @@ impl CounterMappingRegion {
) -> Self {
Self {
counter,
- false_counter: Counter::zero(),
+ false_counter: Counter::ZERO,
file_id,
expanded_file_id: 0,
start_line,
@@ -220,8 +218,8 @@ impl CounterMappingRegion {
end_col: u32,
) -> Self {
Self {
- counter: Counter::zero(),
- false_counter: Counter::zero(),
+ counter: Counter::ZERO,
+ false_counter: Counter::ZERO,
file_id,
expanded_file_id,
start_line,
@@ -243,8 +241,8 @@ impl CounterMappingRegion {
end_col: u32,
) -> Self {
Self {
- counter: Counter::zero(),
- false_counter: Counter::zero(),
+ counter: Counter::ZERO,
+ false_counter: Counter::ZERO,
file_id,
expanded_file_id: 0,
start_line,
@@ -268,7 +266,7 @@ impl CounterMappingRegion {
) -> Self {
Self {
counter,
- false_counter: Counter::zero(),
+ false_counter: Counter::ZERO,
file_id,
expanded_file_id: 0,
start_line,
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
index f1e68af25..e83110dca 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
@@ -1,10 +1,8 @@
use crate::coverageinfo::ffi::{Counter, CounterExpression, ExprKind};
-use rustc_index::{IndexSlice, IndexVec};
-use rustc_middle::bug;
-use rustc_middle::mir::coverage::{
- CodeRegion, CounterId, ExpressionId, MappedExpressionIndex, Op, Operand,
-};
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_index::IndexVec;
+use rustc_middle::mir::coverage::{CodeRegion, CounterId, ExpressionId, Op, Operand};
use rustc_middle::ty::Instance;
use rustc_middle::ty::TyCtxt;
@@ -128,6 +126,58 @@ impl<'tcx> FunctionCoverage<'tcx> {
self.unreachable_regions.push(region)
}
+ /// Perform some simplifications to make the final coverage mappings
+ /// slightly smaller.
+ ///
+ /// This method mainly exists to preserve the simplifications that were
+ /// already being performed by the Rust-side expression renumbering, so that
+ /// the resulting coverage mappings don't get worse.
+ pub(crate) fn simplify_expressions(&mut self) {
+ // The set of expressions that either were optimized out entirely, or
+ // have zero as both of their operands, and will therefore always have
+ // a value of zero. Other expressions that refer to these as operands
+ // can have those operands replaced with `Operand::Zero`.
+ let mut zero_expressions = FxIndexSet::default();
+
+ // For each expression, perform simplifications based on lower-numbered
+ // expressions, and then update the set of always-zero expressions if
+ // necessary.
+ // (By construction, expressions can only refer to other expressions
+ // that have lower IDs, so one simplification pass is sufficient.)
+ for (id, maybe_expression) in self.expressions.iter_enumerated_mut() {
+ let Some(expression) = maybe_expression else {
+ // If an expression is missing, it must have been optimized away,
+ // so any operand that refers to it can be replaced with zero.
+ zero_expressions.insert(id);
+ continue;
+ };
+
+ // If an operand refers to an expression that is always zero, then
+ // that operand can be replaced with `Operand::Zero`.
+ let maybe_set_operand_to_zero = |operand: &mut Operand| match &*operand {
+ Operand::Expression(id) if zero_expressions.contains(id) => {
+ *operand = Operand::Zero;
+ }
+ _ => (),
+ };
+ maybe_set_operand_to_zero(&mut expression.lhs);
+ maybe_set_operand_to_zero(&mut expression.rhs);
+
+ // Coverage counter values cannot be negative, so if an expression
+ // involves subtraction from zero, assume that its RHS must also be zero.
+ // (Do this after simplifications that could set the LHS to zero.)
+ if let Expression { lhs: Operand::Zero, op: Op::Subtract, .. } = expression {
+ expression.rhs = Operand::Zero;
+ }
+
+ // After the above simplifications, if both operands are zero, then
+ // we know that this expression is always zero too.
+ if let Expression { lhs: Operand::Zero, rhs: Operand::Zero, .. } = expression {
+ zero_expressions.insert(id);
+ }
+ }
+ }
+
/// Return the source hash, generated from the HIR node structure, and used to indicate whether
/// or not the source code structure changed between different compilations.
pub fn source_hash(&self) -> u64 {
@@ -146,8 +196,14 @@ impl<'tcx> FunctionCoverage<'tcx> {
self.instance
);
+ let counter_expressions = self.counter_expressions();
+ // Expression IDs are indices into `self.expressions`, and on the LLVM
+ // side they will be treated as indices into `counter_expressions`, so
+ // the two vectors should correspond 1:1.
+ assert_eq!(self.expressions.len(), counter_expressions.len());
+
let counter_regions = self.counter_regions();
- let (counter_expressions, expression_regions) = self.expressions_with_regions();
+ let expression_regions = self.expression_regions();
let unreachable_regions = self.unreachable_regions();
let counter_regions =
@@ -163,149 +219,53 @@ impl<'tcx> FunctionCoverage<'tcx> {
})
}
- fn expressions_with_regions(
- &self,
- ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &CodeRegion)>) {
- let mut counter_expressions = Vec::with_capacity(self.expressions.len());
- let mut expression_regions = Vec::with_capacity(self.expressions.len());
- let mut new_indexes = IndexVec::from_elem_n(None, self.expressions.len());
+ /// Convert this function's coverage expression data into a form that can be
+ /// passed through FFI to LLVM.
+ fn counter_expressions(&self) -> Vec<CounterExpression> {
+ // We know that LLVM will optimize out any unused expressions before
+ // producing the final coverage map, so there's no need to do the same
+ // thing on the Rust side unless we're confident we can do much better.
+ // (See `CounterExpressionsMinimizer` in `CoverageMappingWriter.cpp`.)
- // This closure converts any `Expression` operand (`lhs` or `rhs` of the `Op::Add` or
- // `Op::Subtract` operation) into its native `llvm::coverage::Counter::CounterKind` type
- // and value.
- //
- // Expressions will be returned from this function in a sequential vector (array) of
- // `CounterExpression`, so the expression IDs must be mapped from their original,
- // potentially sparse set of indexes.
- //
- // An `Expression` as an operand will have already been encountered as an `Expression` with
- // operands, so its new_index will already have been generated (as a 1-up index value).
- // (If an `Expression` as an operand does not have a corresponding new_index, it was
- // probably optimized out, after the expression was injected into the MIR, so it will
- // get a `CounterKind::Zero` instead.)
- //
- // In other words, an `Expression`s at any given index can include other expressions as
- // operands, but expression operands can only come from the subset of expressions having
- // `expression_index`s lower than the referencing `Expression`. Therefore, it is
- // reasonable to look up the new index of an expression operand while the `new_indexes`
- // vector is only complete up to the current `ExpressionIndex`.
- type NewIndexes = IndexSlice<ExpressionId, Option<MappedExpressionIndex>>;
- let id_to_counter = |new_indexes: &NewIndexes, operand: Operand| match operand {
- Operand::Zero => Some(Counter::zero()),
- Operand::Counter(id) => Some(Counter::counter_value_reference(id)),
- Operand::Expression(id) => {
- self.expressions
- .get(id)
- .expect("expression id is out of range")
- .as_ref()
- // If an expression was optimized out, assume it would have produced a count
- // of zero. This ensures that expressions dependent on optimized-out
- // expressions are still valid.
- .map_or(Some(Counter::zero()), |_| new_indexes[id].map(Counter::expression))
- }
- };
-
- for (original_index, expression) in
- self.expressions.iter_enumerated().filter_map(|(original_index, entry)| {
- // Option::map() will return None to filter out missing expressions. This may happen
- // if, for example, a MIR-instrumented expression is removed during an optimization.
- entry.as_ref().map(|expression| (original_index, expression))
- })
- {
- let optional_region = &expression.region;
- let Expression { lhs, op, rhs, .. } = *expression;
-
- if let Some(Some((lhs_counter, mut rhs_counter))) = id_to_counter(&new_indexes, lhs)
- .map(|lhs_counter| {
- id_to_counter(&new_indexes, rhs).map(|rhs_counter| (lhs_counter, rhs_counter))
- })
- {
- if lhs_counter.is_zero() && op.is_subtract() {
- // The left side of a subtraction was probably optimized out. As an example,
- // a branch condition might be evaluated as a constant expression, and the
- // branch could be removed, dropping unused counters in the process.
- //
- // Since counters are unsigned, we must assume the result of the expression
- // can be no more and no less than zero. An expression known to evaluate to zero
- // does not need to be added to the coverage map.
- //
- // Coverage test `loops_branches.rs` includes multiple variations of branches
- // based on constant conditional (literal `true` or `false`), and demonstrates
- // that the expected counts are still correct.
- debug!(
- "Expression subtracts from zero (assume unreachable): \
- original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}",
- original_index, lhs, op, rhs, optional_region,
- );
- rhs_counter = Counter::zero();
+ self.expressions
+ .iter()
+ .map(|expression| match expression {
+ None => {
+ // This expression ID was allocated, but we never saw the
+ // actual expression, so it must have been optimized out.
+ // Replace it with a dummy expression, and let LLVM take
+ // care of omitting it from the expression list.
+ CounterExpression::DUMMY
}
- debug_assert!(
- lhs_counter.is_zero()
- // Note: with `as usize` the ID _could_ overflow/wrap if `usize = u16`
- || ((lhs_counter.zero_based_id() as usize)
- <= usize::max(self.counters.len(), self.expressions.len())),
- "lhs id={} > both counters.len()={} and expressions.len()={}
- ({:?} {:?} {:?})",
- lhs_counter.zero_based_id(),
- self.counters.len(),
- self.expressions.len(),
- lhs_counter,
- op,
- rhs_counter,
- );
-
- debug_assert!(
- rhs_counter.is_zero()
- // Note: with `as usize` the ID _could_ overflow/wrap if `usize = u16`
- || ((rhs_counter.zero_based_id() as usize)
- <= usize::max(self.counters.len(), self.expressions.len())),
- "rhs id={} > both counters.len()={} and expressions.len()={}
- ({:?} {:?} {:?})",
- rhs_counter.zero_based_id(),
- self.counters.len(),
- self.expressions.len(),
- lhs_counter,
- op,
- rhs_counter,
- );
-
- // Both operands exist. `Expression` operands exist in `self.expressions` and have
- // been assigned a `new_index`.
- let mapped_expression_index =
- MappedExpressionIndex::from(counter_expressions.len());
- let expression = CounterExpression::new(
- lhs_counter,
- match op {
- Op::Add => ExprKind::Add,
- Op::Subtract => ExprKind::Subtract,
- },
- rhs_counter,
- );
- debug!(
- "Adding expression {:?} = {:?}, region: {:?}",
- mapped_expression_index, expression, optional_region
- );
- counter_expressions.push(expression);
- new_indexes[original_index] = Some(mapped_expression_index);
- if let Some(region) = optional_region {
- expression_regions.push((Counter::expression(mapped_expression_index), region));
+ &Some(Expression { lhs, op, rhs, .. }) => {
+ // Convert the operands and operator as normal.
+ CounterExpression::new(
+ Counter::from_operand(lhs),
+ match op {
+ Op::Add => ExprKind::Add,
+ Op::Subtract => ExprKind::Subtract,
+ },
+ Counter::from_operand(rhs),
+ )
}
- } else {
- bug!(
- "expression has one or more missing operands \
- original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}",
- original_index,
- lhs,
- op,
- rhs,
- optional_region,
- );
- }
- }
- (counter_expressions, expression_regions.into_iter())
+ })
+ .collect::<Vec<_>>()
+ }
+
+ fn expression_regions(&self) -> Vec<(Counter, &CodeRegion)> {
+ // Find all of the expression IDs that weren't optimized out AND have
+ // an attached code region, and return the corresponding mapping as a
+ // counter/region pair.
+ self.expressions
+ .iter_enumerated()
+ .filter_map(|(id, expression)| {
+ let code_region = expression.as_ref()?.region.as_ref()?;
+ Some((Counter::expression(id), code_region))
+ })
+ .collect::<Vec<_>>()
}
fn unreachable_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
- self.unreachable_regions.iter().map(|region| (Counter::zero(), region))
+ self.unreachable_regions.iter().map(|region| (Counter::ZERO, region))
}
}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index 97a99e510..d4e775256 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -1,13 +1,14 @@
use crate::common::CodegenCx;
use crate::coverageinfo;
-use crate::coverageinfo::ffi::{Counter, CounterExpression, CounterMappingRegion};
+use crate::coverageinfo::ffi::CounterMappingRegion;
+use crate::coverageinfo::map_data::FunctionCoverage;
use crate::llvm;
use rustc_codegen_ssa::traits::ConstMethods;
use rustc_data_structures::fx::FxIndexSet;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::DefId;
-use rustc_llvm::RustString;
+use rustc_index::IndexVec;
use rustc_middle::bug;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::coverage::CodeRegion;
@@ -55,21 +56,21 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
return;
}
- let mut mapgen = CoverageMapGenerator::new(tcx);
+ let mut global_file_table = GlobalFileTable::new(tcx);
// Encode coverage mappings and generate function records
let mut function_data = Vec::new();
- for (instance, function_coverage) in function_coverage_map {
+ for (instance, mut function_coverage) in function_coverage_map {
debug!("Generate function coverage for {}, {:?}", cx.codegen_unit.name(), instance);
+ function_coverage.simplify_expressions();
+ let function_coverage = function_coverage;
+
let mangled_function_name = tcx.symbol_name(instance).name;
let source_hash = function_coverage.source_hash();
let is_used = function_coverage.is_used();
- let (expressions, counter_regions) =
- function_coverage.get_expressions_and_counter_regions();
- let coverage_mapping_buffer = llvm::build_byte_buffer(|coverage_mapping_buffer| {
- mapgen.write_coverage_mapping(expressions, counter_regions, coverage_mapping_buffer);
- });
+ let coverage_mapping_buffer =
+ encode_mappings_for_function(&mut global_file_table, &function_coverage);
if coverage_mapping_buffer.is_empty() {
if function_coverage.is_used() {
@@ -87,19 +88,14 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
}
// Encode all filenames referenced by counters/expressions in this module
- let filenames_buffer = llvm::build_byte_buffer(|filenames_buffer| {
- coverageinfo::write_filenames_section_to_buffer(
- mapgen.filenames.iter().map(Symbol::as_str),
- filenames_buffer,
- );
- });
+ let filenames_buffer = global_file_table.into_filenames_buffer();
let filenames_size = filenames_buffer.len();
let filenames_val = cx.const_bytes(&filenames_buffer);
let filenames_ref = coverageinfo::hash_bytes(&filenames_buffer);
// Generate the LLVM IR representation of the coverage map and store it in a well-known global
- let cov_data_val = mapgen.generate_coverage_map(cx, version, filenames_size, filenames_val);
+ let cov_data_val = generate_coverage_map(cx, version, filenames_size, filenames_val);
let covfun_section_name = coverageinfo::covfun_section_name(cx);
for (mangled_function_name, source_hash, is_used, coverage_mapping_buffer) in function_data {
@@ -118,13 +114,13 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) {
coverageinfo::save_cov_data_to_mod(cx, cov_data_val);
}
-struct CoverageMapGenerator {
- filenames: FxIndexSet<Symbol>,
+struct GlobalFileTable {
+ global_file_table: FxIndexSet<Symbol>,
}
-impl CoverageMapGenerator {
+impl GlobalFileTable {
fn new(tcx: TyCtxt<'_>) -> Self {
- let mut filenames = FxIndexSet::default();
+ let mut global_file_table = FxIndexSet::default();
// LLVM Coverage Mapping Format version 6 (zero-based encoded as 5)
// requires setting the first filename to the compilation directory.
// Since rustc generates coverage maps with relative paths, the
@@ -133,94 +129,114 @@ impl CoverageMapGenerator {
let working_dir = Symbol::intern(
&tcx.sess.opts.working_dir.remapped_path_if_available().to_string_lossy(),
);
- filenames.insert(working_dir);
- Self { filenames }
+ global_file_table.insert(working_dir);
+ Self { global_file_table }
}
- /// Using the `expressions` and `counter_regions` collected for the current function, generate
- /// the `mapping_regions` and `virtual_file_mapping`, and capture any new filenames. Then use
- /// LLVM APIs to encode the `virtual_file_mapping`, `expressions`, and `mapping_regions` into
- /// the given `coverage_mapping` byte buffer, compliant with the LLVM Coverage Mapping format.
- fn write_coverage_mapping<'a>(
- &mut self,
- expressions: Vec<CounterExpression>,
- counter_regions: impl Iterator<Item = (Counter, &'a CodeRegion)>,
- coverage_mapping_buffer: &RustString,
- ) {
- let mut counter_regions = counter_regions.collect::<Vec<_>>();
- if counter_regions.is_empty() {
- return;
- }
+ fn global_file_id_for_file_name(&mut self, file_name: Symbol) -> u32 {
+ let (global_file_id, _) = self.global_file_table.insert_full(file_name);
+ global_file_id as u32
+ }
- let mut virtual_file_mapping = Vec::new();
- let mut mapping_regions = Vec::new();
- let mut current_file_name = None;
- let mut current_file_id = 0;
-
- // Convert the list of (Counter, CodeRegion) pairs to an array of `CounterMappingRegion`, sorted
- // by filename and position. Capture any new files to compute the `CounterMappingRegion`s
- // `file_id` (indexing files referenced by the current function), and construct the
- // function-specific `virtual_file_mapping` from `file_id` to its index in the module's
- // `filenames` array.
- counter_regions.sort_unstable_by_key(|(_counter, region)| *region);
- for (counter, region) in counter_regions {
- let CodeRegion { file_name, start_line, start_col, end_line, end_col } = *region;
- let same_file = current_file_name.is_some_and(|p| p == file_name);
- if !same_file {
- if current_file_name.is_some() {
- current_file_id += 1;
- }
- current_file_name = Some(file_name);
- debug!(" file_id: {} = '{:?}'", current_file_id, file_name);
- let (filenames_index, _) = self.filenames.insert_full(file_name);
- virtual_file_mapping.push(filenames_index as u32);
- }
- debug!("Adding counter {:?} to map for {:?}", counter, region);
+ fn into_filenames_buffer(self) -> Vec<u8> {
+ // This method takes `self` so that the caller can't accidentally
+ // modify the original file table after encoding it into a buffer.
+
+ llvm::build_byte_buffer(|buffer| {
+ coverageinfo::write_filenames_section_to_buffer(
+ self.global_file_table.iter().map(Symbol::as_str),
+ buffer,
+ );
+ })
+ }
+}
+
+/// Using the expressions and counter regions collected for a single function,
+/// generate the variable-sized payload of its corresponding `__llvm_covfun`
+/// entry. The payload is returned as a vector of bytes.
+///
+/// Newly-encountered filenames will be added to the global file table.
+fn encode_mappings_for_function(
+ global_file_table: &mut GlobalFileTable,
+ function_coverage: &FunctionCoverage<'_>,
+) -> Vec<u8> {
+ let (expressions, counter_regions) = function_coverage.get_expressions_and_counter_regions();
+
+ let mut counter_regions = counter_regions.collect::<Vec<_>>();
+ if counter_regions.is_empty() {
+ return Vec::new();
+ }
+
+ let mut virtual_file_mapping = IndexVec::<u32, u32>::new();
+ let mut mapping_regions = Vec::with_capacity(counter_regions.len());
+
+ // Sort the list of (counter, region) mapping pairs by region, so that they
+ // can be grouped by filename. Prepare file IDs for each filename, and
+ // prepare the mapping data so that we can pass it through FFI to LLVM.
+ counter_regions.sort_by_key(|(_counter, region)| *region);
+ for counter_regions_for_file in
+ counter_regions.group_by(|(_, a), (_, b)| a.file_name == b.file_name)
+ {
+ // Look up (or allocate) the global file ID for this filename.
+ let file_name = counter_regions_for_file[0].1.file_name;
+ let global_file_id = global_file_table.global_file_id_for_file_name(file_name);
+
+ // Associate that global file ID with a local file ID for this function.
+ let local_file_id: u32 = virtual_file_mapping.push(global_file_id);
+ debug!(" file id: local {local_file_id} => global {global_file_id} = '{file_name:?}'");
+
+ // For each counter/region pair in this function+file, convert it to a
+ // form suitable for FFI.
+ for &(counter, region) in counter_regions_for_file {
+ let CodeRegion { file_name: _, start_line, start_col, end_line, end_col } = *region;
+
+ debug!("Adding counter {counter:?} to map for {region:?}");
mapping_regions.push(CounterMappingRegion::code_region(
counter,
- current_file_id,
+ local_file_id,
start_line,
start_col,
end_line,
end_col,
));
}
+ }
- // Encode and append the current function's coverage mapping data
+ // Encode the function's coverage mappings into a buffer.
+ llvm::build_byte_buffer(|buffer| {
coverageinfo::write_mapping_to_buffer(
- virtual_file_mapping,
+ virtual_file_mapping.raw,
expressions,
mapping_regions,
- coverage_mapping_buffer,
+ buffer,
);
- }
+ })
+}
- /// Construct coverage map header and the array of function records, and combine them into the
- /// coverage map. Save the coverage map data into the LLVM IR as a static global using a
- /// specific, well-known section and name.
- fn generate_coverage_map<'ll>(
- self,
- cx: &CodegenCx<'ll, '_>,
- version: u32,
- filenames_size: usize,
- filenames_val: &'ll llvm::Value,
- ) -> &'ll llvm::Value {
- debug!("cov map: filenames_size = {}, 0-based version = {}", filenames_size, version);
-
- // Create the coverage data header (Note, fields 0 and 2 are now always zero,
- // as of `llvm::coverage::CovMapVersion::Version4`.)
- let zero_was_n_records_val = cx.const_u32(0);
- let filenames_size_val = cx.const_u32(filenames_size as u32);
- let zero_was_coverage_size_val = cx.const_u32(0);
- let version_val = cx.const_u32(version);
- let cov_data_header_val = cx.const_struct(
- &[zero_was_n_records_val, filenames_size_val, zero_was_coverage_size_val, version_val],
- /*packed=*/ false,
- );
+/// Construct coverage map header and the array of function records, and combine them into the
+/// coverage map. Save the coverage map data into the LLVM IR as a static global using a
+/// specific, well-known section and name.
+fn generate_coverage_map<'ll>(
+ cx: &CodegenCx<'ll, '_>,
+ version: u32,
+ filenames_size: usize,
+ filenames_val: &'ll llvm::Value,
+) -> &'ll llvm::Value {
+ debug!("cov map: filenames_size = {}, 0-based version = {}", filenames_size, version);
+
+ // Create the coverage data header (Note, fields 0 and 2 are now always zero,
+ // as of `llvm::coverage::CovMapVersion::Version4`.)
+ let zero_was_n_records_val = cx.const_u32(0);
+ let filenames_size_val = cx.const_u32(filenames_size as u32);
+ let zero_was_coverage_size_val = cx.const_u32(0);
+ let version_val = cx.const_u32(version);
+ let cov_data_header_val = cx.const_struct(
+ &[zero_was_n_records_val, filenames_size_val, zero_was_coverage_size_val, version_val],
+ /*packed=*/ false,
+ );
- // Create the complete LLVM coverage data value to add to the LLVM IR
- cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false)
- }
+ // Create the complete LLVM coverage data value to add to the LLVM IR
+ cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false)
}
/// Construct a function record and combine it with the function's coverage mapping data.
@@ -317,10 +333,10 @@ fn add_unused_functions(cx: &CodegenCx<'_, '_>) {
{
let codegen_fn_attrs = tcx.codegen_fn_attrs(non_codegenned_def_id);
- // If a function is marked `#[no_coverage]`, then skip generating a
+ // If a function is marked `#[coverage(off)]`, then skip generating a
// dead code stub for it.
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_COVERAGE) {
- debug!("skipping unused fn marked #[no_coverage]: {:?}", non_codegenned_def_id);
+ debug!("skipping unused fn marked #[coverage(off)]: {:?}", non_codegenned_def_id);
continue;
}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index 621fd36b2..c70cb670e 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -16,7 +16,7 @@ use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_llvm::RustString;
use rustc_middle::bug;
-use rustc_middle::mir::coverage::{CodeRegion, CounterId, CoverageKind, ExpressionId, Op, Operand};
+use rustc_middle::mir::coverage::{CounterId, CoverageKind};
use rustc_middle::mir::Coverage;
use rustc_middle::ty;
use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
@@ -104,144 +104,67 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
fn add_coverage(&mut self, instance: Instance<'tcx>, coverage: &Coverage) {
let bx = self;
+ let Some(coverage_context) = bx.coverage_context() else { return };
+ let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+ let func_coverage = coverage_map
+ .entry(instance)
+ .or_insert_with(|| FunctionCoverage::new(bx.tcx(), instance));
+
let Coverage { kind, code_region } = coverage.clone();
match kind {
CoverageKind::Counter { function_source_hash, id } => {
- if bx.set_function_source_hash(instance, function_source_hash) {
- // If `set_function_source_hash()` returned true, the coverage map is enabled,
- // so continue adding the counter.
- if let Some(code_region) = code_region {
- // Note: Some counters do not have code regions, but may still be referenced
- // from expressions. In that case, don't add the counter to the coverage map,
- // but do inject the counter intrinsic.
- bx.add_coverage_counter(instance, id, code_region);
- }
-
- let coverageinfo = bx.tcx().coverageinfo(instance.def);
-
- let fn_name = bx.get_pgo_func_name_var(instance);
- let hash = bx.const_u64(function_source_hash);
- let num_counters = bx.const_u32(coverageinfo.num_counters);
- let index = bx.const_u32(id.as_u32());
+ debug!(
+ "ensuring function source hash is set for instance={:?}; function_source_hash={}",
+ instance, function_source_hash,
+ );
+ func_coverage.set_function_source_hash(function_source_hash);
+
+ if let Some(code_region) = code_region {
+ // Note: Some counters do not have code regions, but may still be referenced
+ // from expressions. In that case, don't add the counter to the coverage map,
+ // but do inject the counter intrinsic.
debug!(
- "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
- fn_name, hash, num_counters, index,
+ "adding counter to coverage_map: instance={:?}, id={:?}, region={:?}",
+ instance, id, code_region,
);
- bx.instrprof_increment(fn_name, hash, num_counters, index);
+ func_coverage.add_counter(id, code_region);
}
+ // We need to explicitly drop the `RefMut` before calling into `instrprof_increment`,
+ // as that needs an exclusive borrow.
+ drop(coverage_map);
+
+ let coverageinfo = bx.tcx().coverageinfo(instance.def);
+
+ let fn_name = bx.get_pgo_func_name_var(instance);
+ let hash = bx.const_u64(function_source_hash);
+ let num_counters = bx.const_u32(coverageinfo.num_counters);
+ let index = bx.const_u32(id.as_u32());
+ debug!(
+ "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
+ fn_name, hash, num_counters, index,
+ );
+ bx.instrprof_increment(fn_name, hash, num_counters, index);
}
CoverageKind::Expression { id, lhs, op, rhs } => {
- bx.add_coverage_counter_expression(instance, id, lhs, op, rhs, code_region);
+ debug!(
+ "adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; region: {:?}",
+ instance, id, lhs, op, rhs, code_region,
+ );
+ func_coverage.add_counter_expression(id, lhs, op, rhs, code_region);
}
CoverageKind::Unreachable => {
- bx.add_coverage_unreachable(
- instance,
- code_region.expect("unreachable regions always have code regions"),
+ let code_region =
+ code_region.expect("unreachable regions always have code regions");
+ debug!(
+ "adding unreachable code to coverage_map: instance={:?}, at {:?}",
+ instance, code_region,
);
+ func_coverage.add_unreachable_region(code_region);
}
}
}
}
-// These methods used to be part of trait `CoverageInfoBuilderMethods`, but
-// after moving most coverage code out of SSA they are now just ordinary methods.
-impl<'tcx> Builder<'_, '_, 'tcx> {
- /// Returns true if the function source hash was added to the coverage map (even if it had
- /// already been added, for this instance). Returns false *only* if `-C instrument-coverage` is
- /// not enabled (a coverage map is not being generated).
- fn set_function_source_hash(
- &mut self,
- instance: Instance<'tcx>,
- function_source_hash: u64,
- ) -> bool {
- if let Some(coverage_context) = self.coverage_context() {
- debug!(
- "ensuring function source hash is set for instance={:?}; function_source_hash={}",
- instance, function_source_hash,
- );
- let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
- coverage_map
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .set_function_source_hash(function_source_hash);
- true
- } else {
- false
- }
- }
-
- /// Returns true if the counter was added to the coverage map; false if `-C instrument-coverage`
- /// is not enabled (a coverage map is not being generated).
- fn add_coverage_counter(
- &mut self,
- instance: Instance<'tcx>,
- id: CounterId,
- region: CodeRegion,
- ) -> bool {
- if let Some(coverage_context) = self.coverage_context() {
- debug!(
- "adding counter to coverage_map: instance={:?}, id={:?}, region={:?}",
- instance, id, region,
- );
- let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
- coverage_map
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .add_counter(id, region);
- true
- } else {
- false
- }
- }
-
- /// Returns true if the expression was added to the coverage map; false if
- /// `-C instrument-coverage` is not enabled (a coverage map is not being generated).
- fn add_coverage_counter_expression(
- &mut self,
- instance: Instance<'tcx>,
- id: ExpressionId,
- lhs: Operand,
- op: Op,
- rhs: Operand,
- region: Option<CodeRegion>,
- ) -> bool {
- if let Some(coverage_context) = self.coverage_context() {
- debug!(
- "adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; \
- region: {:?}",
- instance, id, lhs, op, rhs, region,
- );
- let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
- coverage_map
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .add_counter_expression(id, lhs, op, rhs, region);
- true
- } else {
- false
- }
- }
-
- /// Returns true if the region was added to the coverage map; false if `-C instrument-coverage`
- /// is not enabled (a coverage map is not being generated).
- fn add_coverage_unreachable(&mut self, instance: Instance<'tcx>, region: CodeRegion) -> bool {
- if let Some(coverage_context) = self.coverage_context() {
- debug!(
- "adding unreachable code to coverage_map: instance={:?}, at {:?}",
- instance, region,
- );
- let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
- coverage_map
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .add_unreachable_region(region);
- true
- } else {
- false
- }
- }
-}
-
fn declare_unused_fn<'tcx>(cx: &CodegenCx<'_, 'tcx>, def_id: DefId) -> Instance<'tcx> {
let tcx = cx.tcx;
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
index d174a3593..aff764f02 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
@@ -20,7 +20,7 @@ pub fn compute_mir_scopes<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
instance: Instance<'tcx>,
mir: &Body<'tcx>,
- debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
+ debug_context: &mut FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>,
) {
// Find all scopes with variables defined in them.
let variables = if cx.sess().opts.debuginfo == DebugInfo::Full {
@@ -51,7 +51,7 @@ fn make_mir_scope<'ll, 'tcx>(
instance: Instance<'tcx>,
mir: &Body<'tcx>,
variables: &Option<BitSet<SourceScope>>,
- debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
+ debug_context: &mut FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>,
instantiated: &mut BitSet<SourceScope>,
scope: SourceScope,
) {
@@ -68,7 +68,7 @@ fn make_mir_scope<'ll, 'tcx>(
let file = cx.sess().source_map().lookup_source_file(mir.span.lo());
debug_context.scopes[scope] = DebugScope {
file_start_pos: file.start_pos,
- file_end_pos: file.end_pos,
+ file_end_pos: file.end_position(),
..debug_context.scopes[scope]
};
instantiated.insert(scope);
@@ -86,27 +86,31 @@ fn make_mir_scope<'ll, 'tcx>(
let loc = cx.lookup_debug_loc(scope_data.span.lo());
let file_metadata = file_metadata(cx, &loc.file);
- let dbg_scope = match scope_data.inlined {
+ let parent_dbg_scope = match scope_data.inlined {
Some((callee, _)) => {
// FIXME(eddyb) this would be `self.monomorphize(&callee)`
// if this is moved to `rustc_codegen_ssa::mir::debuginfo`.
- let callee = cx.tcx.subst_and_normalize_erasing_regions(
+ let callee = cx.tcx.instantiate_and_normalize_erasing_regions(
instance.args,
ty::ParamEnv::reveal_all(),
ty::EarlyBinder::bind(callee),
);
- let callee_fn_abi = cx.fn_abi_of_instance(callee, ty::List::empty());
- cx.dbg_scope_fn(callee, callee_fn_abi, None)
+ debug_context.inlined_function_scopes.entry(callee).or_insert_with(|| {
+ let callee_fn_abi = cx.fn_abi_of_instance(callee, ty::List::empty());
+ cx.dbg_scope_fn(callee, callee_fn_abi, None)
+ })
}
- None => unsafe {
- llvm::LLVMRustDIBuilderCreateLexicalBlock(
- DIB(cx),
- parent_scope.dbg_scope,
- file_metadata,
- loc.line,
- loc.col,
- )
- },
+ None => parent_scope.dbg_scope,
+ };
+
+ let dbg_scope = unsafe {
+ llvm::LLVMRustDIBuilderCreateLexicalBlock(
+ DIB(cx),
+ parent_dbg_scope,
+ file_metadata,
+ loc.line,
+ loc.col,
+ )
};
let inlined_at = scope_data.inlined.map(|(_, callsite_span)| {
@@ -120,7 +124,7 @@ fn make_mir_scope<'ll, 'tcx>(
dbg_scope,
inlined_at: inlined_at.or(parent_scope.inlined_at),
file_start_pos: loc.file.start_pos,
- file_end_pos: loc.file.end_pos,
+ file_end_pos: loc.file.end_position(),
};
instantiated.insert(scope);
}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index f8cbcbd5e..ed9387616 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -445,9 +445,9 @@ pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll D
ty::RawPtr(ty::TypeAndMut { ty: pointee_type, .. }) | ty::Ref(_, pointee_type, _) => {
build_pointer_or_reference_di_node(cx, t, pointee_type, unique_type_id)
}
- // Box<T, A> may have a non-ZST allocator A. In that case, we
+ // Box<T, A> may have a non-1-ZST allocator A. In that case, we
// cannot treat Box<T, A> as just an owned alias of `*mut T`.
- ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => {
+ ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_1zst() => {
build_pointer_or_reference_di_node(cx, t, t.boxed_ty(), unique_type_id)
}
ty::FnDef(..) | ty::FnPtr(_) => build_subroutine_type_di_node(cx, unique_type_id),
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index 40714a0af..30cc9ea9b 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -263,11 +263,11 @@ impl CodegenCx<'_, '_> {
pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
Ok(SourceFileAndLine { sf: file, line }) => {
- let line_pos = file.lines(|lines| lines[line]);
+ let line_pos = file.lines()[line];
// Use 1-based indexing.
let line = (line + 1) as u32;
- let col = (pos - line_pos).to_u32() + 1;
+ let col = (file.relative_position(pos) - line_pos).to_u32() + 1;
(file, line, col)
}
@@ -292,7 +292,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
llfn: &'ll Value,
mir: &mir::Body<'tcx>,
- ) -> Option<FunctionDebugContext<&'ll DIScope, &'ll DILocation>> {
+ ) -> Option<FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>> {
if self.sess().opts.debuginfo == DebugInfo::None {
return None;
}
@@ -304,8 +304,10 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
file_start_pos: BytePos(0),
file_end_pos: BytePos(0),
};
- let mut fn_debug_context =
- FunctionDebugContext { scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes) };
+ let mut fn_debug_context = FunctionDebugContext {
+ scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes),
+ inlined_function_scopes: Default::default(),
+ };
// Fill in all the scopes, with the information from the MIR body.
compute_mir_scopes(self, instance, mir, &mut fn_debug_context);
@@ -347,6 +349,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
type_names::push_generic_params(
tcx,
tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args),
+ enclosing_fn_def_id,
&mut name,
);
@@ -526,7 +529,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
if let Some(impl_def_id) = cx.tcx.impl_of_method(instance.def_id()) {
// If the method does *not* belong to a trait, proceed
if cx.tcx.trait_id_of_impl(impl_def_id).is_none() {
- let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions(
+ let impl_self_ty = cx.tcx.instantiate_and_normalize_erasing_regions(
instance.args,
ty::ParamEnv::reveal_all(),
cx.tcx.type_of(impl_def_id),
diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs
index fced6d504..665d19579 100644
--- a/compiler/rustc_codegen_llvm/src/errors.rs
+++ b/compiler/rustc_codegen_llvm/src/errors.rs
@@ -139,6 +139,10 @@ pub(crate) struct LtoDisallowed;
pub(crate) struct LtoDylib;
#[derive(Diagnostic)]
+#[diag(codegen_llvm_lto_proc_macro)]
+pub(crate) struct LtoProcMacro;
+
+#[derive(Diagnostic)]
#[diag(codegen_llvm_lto_bitcode_from_rlib)]
pub(crate) struct LtoBitcodeFromRlib {
pub llvm_err: String,
@@ -226,3 +230,9 @@ pub(crate) struct WriteBytecode<'a> {
pub(crate) struct CopyBitcode {
pub err: std::io::Error,
}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_unknown_debuginfo_compression)]
+pub struct UnknownCompression {
+ pub algorithm: &'static str,
+}
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index a9b06030e..a97b803fc 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -15,7 +15,7 @@ use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::*;
use rustc_hir as hir;
use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf};
-use rustc_middle::ty::{self, Ty};
+use rustc_middle::ty::{self, GenericArgsRef, Ty};
use rustc_middle::{bug, span_bug};
use rustc_span::{sym, symbol::kw, Span, Symbol};
use rustc_target::abi::{self, Align, HasDataLayout, Primitive};
@@ -165,7 +165,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
sym::volatile_load | sym::unaligned_volatile_load => {
let tp_ty = fn_args.type_at(0);
let ptr = args[0].immediate();
- let load = if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
+ let load = if let PassMode::Cast { cast: ty, pad_i32: _ } = &fn_abi.ret.mode {
let llty = ty.llvm_type(self);
self.volatile_load(llty, ptr)
} else {
@@ -376,7 +376,9 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
}
_ if name.as_str().starts_with("simd_") => {
- match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
+ match generic_simd_intrinsic(
+ self, name, callee_ty, fn_args, args, ret_ty, llret_ty, span,
+ ) {
Ok(llval) => llval,
Err(()) => return,
}
@@ -386,7 +388,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
};
if !fn_abi.ret.is_ignore() {
- if let PassMode::Cast(_, _) = &fn_abi.ret.mode {
+ if let PassMode::Cast { .. } = &fn_abi.ret.mode {
self.store(llval, result.llval, result.align);
} else {
OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
@@ -911,6 +913,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
bx: &mut Builder<'_, 'll, 'tcx>,
name: Symbol,
callee_ty: Ty<'tcx>,
+ fn_args: GenericArgsRef<'tcx>,
args: &[OperandRef<'tcx, &'ll Value>],
ret_ty: Ty<'tcx>,
llret_ty: &'ll Type,
@@ -1030,6 +1033,56 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
));
}
+ if name == sym::simd_shuffle_generic {
+ let idx = fn_args[2]
+ .expect_const()
+ .eval(tcx, ty::ParamEnv::reveal_all(), Some(span))
+ .unwrap()
+ .unwrap_branch();
+ let n = idx.len() as u64;
+
+ require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty });
+ let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ out_len == n,
+ InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
+ );
+ require!(
+ in_elem == out_ty,
+ InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
+ );
+
+ let total_len = in_len * 2;
+
+ let indices: Option<Vec<_>> = idx
+ .iter()
+ .enumerate()
+ .map(|(arg_idx, val)| {
+ let idx = val.unwrap_leaf().try_to_i32().unwrap();
+ if idx >= i32::try_from(total_len).unwrap() {
+ bx.sess().emit_err(InvalidMonomorphization::ShuffleIndexOutOfBounds {
+ span,
+ name,
+ arg_idx: arg_idx as u64,
+ total_len: total_len.into(),
+ });
+ None
+ } else {
+ Some(bx.const_i32(idx))
+ }
+ })
+ .collect();
+ let Some(indices) = indices else {
+ return Ok(bx.const_null(llret_ty));
+ };
+
+ return Ok(bx.shuffle_vector(
+ args[0].immediate(),
+ args[1].immediate(),
+ bx.const_vector(&indices),
+ ));
+ }
+
if name == sym::simd_shuffle {
// Make sure this is actually an array, since typeck only checks the length-suffixed
// version of this intrinsic.
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index d283299ac..9c5edd6bd 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -10,6 +10,7 @@
#![feature(iter_intersperse)]
#![feature(let_chains)]
#![feature(never_type)]
+#![feature(slice_group_by)]
#![feature(impl_trait_in_assoc_type)]
#![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)]
@@ -21,6 +22,7 @@ extern crate rustc_macros;
#[macro_use]
extern crate tracing;
+use back::owned_target_machine::OwnedTargetMachine;
use back::write::{create_informational_target_machine, create_target_machine};
use errors::ParseTargetMachineConfig;
@@ -38,8 +40,8 @@ use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, FatalError, Handler, Subd
use rustc_fluent_macro::fluent_messages;
use rustc_metadata::EncodedMetadata;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
-use rustc_middle::query::Providers;
use rustc_middle::ty::TyCtxt;
+use rustc_middle::util::Providers;
use rustc_session::config::{OptLevel, OutputFilenames, PrintKind, PrintRequest};
use rustc_session::Session;
use rustc_span::symbol::Symbol;
@@ -47,10 +49,12 @@ use rustc_span::symbol::Symbol;
use std::any::Any;
use std::ffi::CStr;
use std::io::Write;
+use std::mem::ManuallyDrop;
mod back {
pub mod archive;
pub mod lto;
+ pub mod owned_target_machine;
mod profiling;
pub mod write;
}
@@ -161,7 +165,7 @@ impl ExtraBackendMethods for LlvmCodegenBackend {
impl WriteBackendMethods for LlvmCodegenBackend {
type Module = ModuleLlvm;
type ModuleBuffer = back::lto::ModuleBuffer;
- type TargetMachine = &'static mut llvm::TargetMachine;
+ type TargetMachine = OwnedTargetMachine;
type TargetMachineError = crate::errors::LlvmError<'static>;
type ThinData = back::lto::ThinData;
type ThinBuffer = back::lto::ThinBuffer;
@@ -400,7 +404,10 @@ impl CodegenBackend for LlvmCodegenBackend {
pub struct ModuleLlvm {
llcx: &'static mut llvm::Context,
llmod_raw: *const llvm::Module,
- tm: &'static mut llvm::TargetMachine,
+
+ // This field is `ManuallyDrop` because it is important that the `TargetMachine`
+ // is disposed prior to the `Context` being disposed otherwise UAFs can occur.
+ tm: ManuallyDrop<OwnedTargetMachine>,
}
unsafe impl Send for ModuleLlvm {}
@@ -411,7 +418,11 @@ impl ModuleLlvm {
unsafe {
let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
- ModuleLlvm { llmod_raw, llcx, tm: create_target_machine(tcx, mod_name) }
+ ModuleLlvm {
+ llmod_raw,
+ llcx,
+ tm: ManuallyDrop::new(create_target_machine(tcx, mod_name)),
+ }
}
}
@@ -419,7 +430,11 @@ impl ModuleLlvm {
unsafe {
let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
- ModuleLlvm { llmod_raw, llcx, tm: create_informational_target_machine(tcx.sess) }
+ ModuleLlvm {
+ llmod_raw,
+ llcx,
+ tm: ManuallyDrop::new(create_informational_target_machine(tcx.sess)),
+ }
}
}
@@ -440,7 +455,7 @@ impl ModuleLlvm {
}
};
- Ok(ModuleLlvm { llmod_raw, llcx, tm })
+ Ok(ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) })
}
}
@@ -452,7 +467,7 @@ impl ModuleLlvm {
impl Drop for ModuleLlvm {
fn drop(&mut self) {
unsafe {
- llvm::LLVMRustDisposeTargetMachine(&mut *(self.tm as *mut _));
+ ManuallyDrop::drop(&mut self.tm);
llvm::LLVMContextDispose(&mut *(self.llcx as *mut _));
}
}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 84157d1e2..a038b3af0 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -83,12 +83,17 @@ pub enum LLVMModFlagBehavior {
// Consts for the LLVM CallConv type, pre-cast to usize.
/// LLVM CallingConv::ID. Should we wrap this?
+///
+/// See <https://github.com/llvm/llvm-project/blob/main/llvm/include/llvm/IR/CallingConv.h>
#[derive(Copy, Clone, PartialEq, Debug)]
#[repr(C)]
pub enum CallConv {
CCallConv = 0,
FastCallConv = 8,
ColdCallConv = 9,
+ PreserveMost = 14,
+ PreserveAll = 15,
+ Tail = 18,
X86StdcallCallConv = 64,
X86FastcallCallConv = 65,
ArmAapcsCallConv = 67,
@@ -2107,6 +2112,8 @@ extern "C" {
);
pub fn LLVMRustGetHostCPUName(len: *mut usize) -> *const c_char;
+
+ // This function makes copies of pointed to data, so the data's lifetime may end after this function returns
pub fn LLVMRustCreateTargetMachine(
Triple: *const c_char,
CPU: *const c_char,
@@ -2126,9 +2133,14 @@ extern "C" {
RelaxELFRelocations: bool,
UseInitArray: bool,
SplitDwarfFile: *const c_char,
+ OutputObjFile: *const c_char,
+ DebugInfoCompression: *const c_char,
ForceEmulatedTls: bool,
- ) -> Option<&'static mut TargetMachine>;
- pub fn LLVMRustDisposeTargetMachine(T: &'static mut TargetMachine);
+ ArgsCstrBuff: *const c_char,
+ ArgsCstrBuffLen: usize,
+ ) -> *mut TargetMachine;
+
+ pub fn LLVMRustDisposeTargetMachine(T: *mut TargetMachine);
pub fn LLVMRustAddLibraryInfo<'a>(
PM: &PassManager<'a>,
M: &'a Module,
@@ -2314,6 +2326,12 @@ extern "C" {
len: usize,
out_len: &mut usize,
) -> *const u8;
+ pub fn LLVMRustGetSliceFromObjectDataByName(
+ data: *const u8,
+ len: usize,
+ name: *const u8,
+ out_len: &mut usize,
+ ) -> *const u8;
pub fn LLVMRustLinkerNew(M: &Module) -> &mut Linker<'_>;
pub fn LLVMRustLinkerAdd(
@@ -2352,6 +2370,10 @@ extern "C" {
pub fn LLVMRustIsBitcode(ptr: *const u8, len: usize) -> bool;
+ pub fn LLVMRustLLVMHasZlibCompressionForDebugSymbols() -> bool;
+
+ pub fn LLVMRustLLVMHasZstdCompressionForDebugSymbols() -> bool;
+
pub fn LLVMRustGetSymbols(
buf_ptr: *const u8,
buf_len: usize,
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index a76c9c9b7..7c8ef67ff 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -303,7 +303,7 @@ pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
// check that all features in a given smallvec are enabled
for llvm_feature in to_llvm_features(sess, feature) {
let cstr = SmallCStr::new(llvm_feature);
- if !unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) } {
+ if !unsafe { llvm::LLVMRustHasFeature(&target_machine, cstr.as_ptr()) } {
return false;
}
}
@@ -422,14 +422,14 @@ pub(crate) fn print(req: &PrintRequest, mut out: &mut dyn PrintBackendInfo, sess
}
unsafe {
llvm::LLVMRustPrintTargetCPUs(
- tm,
+ &tm,
cpu_cstring.as_ptr(),
callback,
&mut out as *mut &mut dyn PrintBackendInfo as *mut c_void,
);
}
}
- PrintKind::TargetFeatures => print_target_features(out, sess, tm),
+ PrintKind::TargetFeatures => print_target_features(out, sess, &tm),
_ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req),
}
}
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index 831645579..dcc62d314 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -3,7 +3,7 @@ use crate::context::TypeLowering;
use crate::type_::Type;
use rustc_codegen_ssa::traits::*;
use rustc_middle::bug;
-use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
use rustc_middle::ty::{self, Ty, TypeVisitableExt};
use rustc_target::abi::HasDataLayout;
@@ -215,20 +215,16 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
/// of that field's type - this is useful for taking the address of
/// that field and ensuring the struct has the right alignment.
fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
+ // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
+ // In other words, this should generally not look at the type at all, but only at the
+ // layout.
if let Abi::Scalar(scalar) = self.abi {
// Use a different cache for scalars because pointers to DSTs
// can be either fat or thin (data pointers of fat pointers).
if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
return llty;
}
- let llty = match *self.ty.kind() {
- ty::Ref(..) | ty::RawPtr(_) => cx.type_ptr(),
- ty::Adt(def, _) if def.is_box() => cx.type_ptr(),
- ty::FnPtr(sig) => {
- cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty()))
- }
- _ => self.scalar_llvm_type_at(cx, scalar),
- };
+ let llty = self.scalar_llvm_type_at(cx, scalar);
cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
return llty;
}
@@ -303,27 +299,9 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
index: usize,
immediate: bool,
) -> &'a Type {
- // HACK(eddyb) special-case fat pointers until LLVM removes
- // pointee types, to avoid bitcasting every `OperandRef::deref`.
- match *self.ty.kind() {
- ty::Ref(..) | ty::RawPtr(_) => {
- return self.field(cx, index).llvm_type(cx);
- }
- // only wide pointer boxes are handled as pointers
- // thin pointer boxes with scalar allocators are handled by the general logic below
- ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => {
- let ptr_ty = Ty::new_mut_ptr(cx.tcx, self.ty.boxed_ty());
- return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
- }
- // `dyn* Trait` has the same ABI as `*mut dyn Trait`
- ty::Dynamic(bounds, region, ty::DynStar) => {
- let ptr_ty =
- Ty::new_mut_ptr(cx.tcx, Ty::new_dynamic(cx.tcx, bounds, region, ty::Dyn));
- return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
- }
- _ => {}
- }
-
+ // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
+ // In other words, this should generally not look at the type at all, but only at the
+ // layout.
let Abi::ScalarPair(a, b) = self.abi else {
bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
};
diff --git a/compiler/rustc_codegen_ssa/messages.ftl b/compiler/rustc_codegen_ssa/messages.ftl
index b6c70c622..6f7d7482a 100644
--- a/compiler/rustc_codegen_ssa/messages.ftl
+++ b/compiler/rustc_codegen_ssa/messages.ftl
@@ -19,10 +19,10 @@ codegen_ssa_copy_path_buf = unable to copy {$source_file} to {$output_path}: {$e
codegen_ssa_create_temp_dir = couldn't create a temp dir: {$error}
-codegen_ssa_erroneous_constant = erroneous constant encountered
-
codegen_ssa_error_creating_remark_dir = failed to create remark directory: {$error}
+codegen_ssa_expected_coverage_symbol = expected `coverage(off)` or `coverage(on)`
+
codegen_ssa_expected_used_symbol = expected `used`, `used(compiler)` or `used(linker)`
codegen_ssa_extern_funcs_not_found = some `extern` functions couldn't be found; some native libraries may need to be installed or have their path specified
@@ -35,6 +35,8 @@ codegen_ssa_extract_bundled_libs_parse_archive = failed to parse archive '{$rlib
codegen_ssa_extract_bundled_libs_read_entry = failed to read entry '{$rlib}': {$error}
codegen_ssa_extract_bundled_libs_write_file = failed to write file '{$rlib}': {$error}
+codegen_ssa_failed_to_get_layout = failed to get layout for {$ty}: {$err}
+
codegen_ssa_failed_to_write = failed to write {$path}: {$error}
codegen_ssa_ignoring_emit_path = ignoring emit path because multiple .{$extension} files were produced
@@ -44,8 +46,6 @@ codegen_ssa_ignoring_output = ignoring -o because multiple .{$extension} files w
codegen_ssa_illegal_link_ordinal_format = illegal ordinal format in `link_ordinal`
.note = an unsuffixed integer value, e.g., `1`, is expected
-codegen_ssa_incompatible_linking_modifiers = link modifiers combination `+bundle,+whole-archive` is unstable when generating rlibs
-
codegen_ssa_insufficient_vs_code_product = VS Code is a different product, and is not sufficient.
codegen_ssa_invalid_link_ordinal_nargs = incorrect number of arguments to `#[link_ordinal]`
@@ -170,8 +170,6 @@ codegen_ssa_no_natvis_directory = error enumerating natvis directory: {$error}
codegen_ssa_option_gcc_only = option `-Z gcc-ld` is used even though linker flavor is not gcc
-codegen_ssa_polymorphic_constant_too_generic = codegen encountered polymorphic constant: TooGeneric
-
codegen_ssa_processing_dymutil_failed = processing debug info with `dsymutil` failed: {$status}
.note = {$output}
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs
index a7ac728c5..c4a0f6291 100644
--- a/compiler/rustc_codegen_ssa/src/back/link.rs
+++ b/compiler/rustc_codegen_ssa/src/back/link.rs
@@ -365,15 +365,9 @@ fn link_rlib<'a>(
// loaded from the libraries found here and then encode that into the
// metadata of the rlib we're generating somehow.
for lib in codegen_results.crate_info.used_libraries.iter() {
- let NativeLibKind::Static { bundle: None | Some(true), whole_archive } = lib.kind else {
+ let NativeLibKind::Static { bundle: None | Some(true), .. } = lib.kind else {
continue;
};
- if whole_archive == Some(true)
- && flavor == RlibFlavor::Normal
- && !codegen_results.crate_info.feature_packed_bundled_libs
- {
- sess.emit_err(errors::IncompatibleLinkingModifiers);
- }
if flavor == RlibFlavor::Normal && let Some(filename) = lib.filename {
let path = find_native_static_library(filename.as_str(), true, &lib_search_paths, sess);
let src = read(path).map_err(|e| sess.emit_fatal(errors::ReadFileError {message: e }))?;
diff --git a/compiler/rustc_codegen_ssa/src/back/metadata.rs b/compiler/rustc_codegen_ssa/src/back/metadata.rs
index 4c8547407..c6f4bd35e 100644
--- a/compiler/rustc_codegen_ssa/src/back/metadata.rs
+++ b/compiler/rustc_codegen_ssa/src/back/metadata.rs
@@ -226,9 +226,7 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
let mut file = write::Object::new(binary_format, architecture, endianness);
if sess.target.is_like_osx {
- if let Some(build_version) = macho_object_build_version_for_target(&sess.target) {
- file.set_macho_build_version(build_version)
- }
+ file.set_macho_build_version(macho_object_build_version_for_target(&sess.target))
}
let e_flags = match architecture {
Architecture::Mips => {
@@ -334,31 +332,28 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static
Some(file)
}
-/// Apple's LD, when linking for Mac Catalyst, requires object files to
-/// contain information about what they were built for (LC_BUILD_VERSION):
-/// the platform (macOS/watchOS etc), minimum OS version, and SDK version.
-/// This returns a `MachOBuildVersion` if necessary for the target.
-fn macho_object_build_version_for_target(
- target: &Target,
-) -> Option<object::write::MachOBuildVersion> {
- if !target.llvm_target.ends_with("-macabi") {
- return None;
- }
+/// Since Xcode 15 Apple's LD requires object files to contain information about what they were
+/// built for (LC_BUILD_VERSION): the platform (macOS/watchOS etc), minimum OS version, and SDK
+/// version. This returns a `MachOBuildVersion` for the target.
+fn macho_object_build_version_for_target(target: &Target) -> object::write::MachOBuildVersion {
/// The `object` crate demands "X.Y.Z encoded in nibbles as xxxx.yy.zz"
/// e.g. minOS 14.0 = 0x000E0000, or SDK 16.2 = 0x00100200
fn pack_version((major, minor): (u32, u32)) -> u32 {
(major << 16) | (minor << 8)
}
- let platform = object::macho::PLATFORM_MACCATALYST;
- let min_os = (14, 0);
- let sdk = (16, 2);
+ let platform =
+ rustc_target::spec::current_apple_platform(target).expect("unknown Apple target OS");
+ let min_os = rustc_target::spec::current_apple_deployment_target(target)
+ .expect("unknown Apple target OS");
+ let sdk =
+ rustc_target::spec::current_apple_sdk_version(platform).expect("unknown Apple target OS");
let mut build_version = object::write::MachOBuildVersion::default();
build_version.platform = platform;
build_version.minos = pack_version(min_os);
build_version.sdk = pack_version(sdk);
- Some(build_version)
+ build_version
}
pub enum MetadataPosition {
diff --git a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
index 8fb2ccb7e..9cd439410 100644
--- a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
+++ b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
@@ -11,10 +11,10 @@ use rustc_middle::middle::exported_symbols::{
metadata_symbol_name, ExportedSymbol, SymbolExportInfo, SymbolExportKind, SymbolExportLevel,
};
use rustc_middle::query::LocalCrate;
-use rustc_middle::query::{ExternProviders, Providers};
use rustc_middle::ty::Instance;
use rustc_middle::ty::{self, SymbolName, TyCtxt};
use rustc_middle::ty::{GenericArgKind, GenericArgsRef};
+use rustc_middle::util::Providers;
use rustc_session::config::{CrateType, OomStrategy};
use rustc_target::spec::SanitizerSet;
@@ -334,7 +334,7 @@ fn exported_symbols_provider_local(
match *mono_item {
MonoItem::Fn(Instance { def: InstanceDef::Item(def), args }) => {
- if args.non_erasable_generics().next().is_some() {
+ if args.non_erasable_generics(tcx, def).next().is_some() {
let symbol = ExportedSymbol::Generic(def, args);
symbols.push((
symbol,
@@ -346,10 +346,10 @@ fn exported_symbols_provider_local(
));
}
}
- MonoItem::Fn(Instance { def: InstanceDef::DropGlue(_, Some(ty)), args }) => {
+ MonoItem::Fn(Instance { def: InstanceDef::DropGlue(def_id, Some(ty)), args }) => {
// A little sanity-check
debug_assert_eq!(
- args.non_erasable_generics().next(),
+ args.non_erasable_generics(tcx, def_id).next(),
Some(GenericArgKind::Type(ty))
);
symbols.push((
@@ -457,11 +457,9 @@ pub fn provide(providers: &mut Providers) {
providers.is_unreachable_local_definition = is_unreachable_local_definition_provider;
providers.upstream_drop_glue_for = upstream_drop_glue_for_provider;
providers.wasm_import_module_map = wasm_import_module_map;
-}
-
-pub fn provide_extern(providers: &mut ExternProviders) {
- providers.is_reachable_non_generic = is_reachable_non_generic_provider_extern;
- providers.upstream_monomorphizations_for = upstream_monomorphizations_for_provider;
+ providers.extern_queries.is_reachable_non_generic = is_reachable_non_generic_provider_extern;
+ providers.extern_queries.upstream_monomorphizations_for =
+ upstream_monomorphizations_for_provider;
}
fn symbol_export_level(tcx: TyCtxt<'_>, sym_def_id: DefId) -> SymbolExportLevel {
diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs
index f485af00b..f192747c8 100644
--- a/compiler/rustc_codegen_ssa/src/back/write.rs
+++ b/compiler/rustc_codegen_ssa/src/back/write.rs
@@ -286,6 +286,10 @@ pub struct TargetMachineFactoryConfig {
/// so the path to the dwarf object has to be provided when we create the target machine.
/// This can be ignored by backends which do not need it for their Split DWARF support.
pub split_dwarf_file: Option<PathBuf>,
+
+ /// The name of the output object file. Used for setting OutputFilenames in target options
+ /// so that LLVM can emit the CodeView S_OBJNAME record in pdb files
+ pub output_obj_file: Option<PathBuf>,
}
impl TargetMachineFactoryConfig {
@@ -302,7 +306,10 @@ impl TargetMachineFactoryConfig {
} else {
None
};
- TargetMachineFactoryConfig { split_dwarf_file }
+
+ let output_obj_file =
+ Some(cgcx.output_filenames.temp_path(OutputType::Object, Some(module_name)));
+ TargetMachineFactoryConfig { split_dwarf_file, output_obj_file }
}
}
@@ -343,6 +350,12 @@ pub struct CodegenContext<B: WriteBackendMethods> {
pub split_debuginfo: rustc_target::spec::SplitDebuginfo,
pub split_dwarf_kind: rustc_session::config::SplitDwarfKind,
+ /// All commandline args used to invoke the compiler, with @file args fully expanded.
+ /// This will only be used within debug info, e.g. in the pdb file on windows
+ /// This is mainly useful for other tools that reads that debuginfo to figure out
+ /// how to call the compiler with the same arguments.
+ pub expanded_args: Vec<String>,
+
/// Handler to use for diagnostics produced during codegen.
pub diag_emitter: SharedEmitter,
/// LLVM optimizations for which we want to print remarks.
@@ -1108,6 +1121,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(),
coordinator_send,
+ expanded_args: tcx.sess.expanded_args.clone(),
diag_emitter: shared_emitter.clone(),
output_filenames: tcx.output_filenames(()).clone(),
regular_module_config: regular_config,
diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs
index aa003e4e8..1e4ea73a1 100644
--- a/compiler/rustc_codegen_ssa/src/base.rs
+++ b/compiler/rustc_codegen_ssa/src/base.rs
@@ -181,7 +181,7 @@ pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
old_info
}
}
- (_, &ty::Dynamic(ref data, _, _)) => meth::get_vtable(cx, source, data.principal()),
+ (_, ty::Dynamic(data, _, _)) => meth::get_vtable(cx, source, data.principal()),
_ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
}
}
@@ -202,7 +202,7 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
(src, unsized_info(bx, a, b, old_info))
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
- assert_eq!(def_a, def_b);
+ assert_eq!(def_a, def_b); // implies same number of fields
let src_layout = bx.cx().layout_of(src_ty);
let dst_layout = bx.cx().layout_of(dst_ty);
if src_ty == dst_ty {
@@ -211,7 +211,8 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let mut result = None;
for i in 0..src_layout.fields.count() {
let src_f = src_layout.field(bx.cx(), i);
- if src_f.is_zst() {
+ if src_f.is_1zst() {
+ // We are looking for the one non-1-ZST field; this is not it.
continue;
}
@@ -272,13 +273,14 @@ pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
- assert_eq!(def_a, def_b);
+ assert_eq!(def_a, def_b); // implies same number of fields
for i in def_a.variant(FIRST_VARIANT).fields.indices() {
let src_f = src.project_field(bx, i.as_usize());
let dst_f = dst.project_field(bx, i.as_usize());
if dst_f.layout.is_zst() {
+ // No data here, nothing to copy/coerce.
continue;
}
@@ -418,9 +420,11 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
rust_main_def_id: DefId,
entry_type: EntryFnType,
) -> Bx::Function {
- // The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
- // depending on whether the target needs `argc` and `argv` to be passed in.
- let llfty = if cx.sess().target.main_needs_argc_argv {
+ // The entry function is either `int main(void)` or `int main(int argc, char **argv)`, or
+ // `usize efi_main(void *handle, void *system_table)` depending on the target.
+ let llfty = if cx.sess().target.os.contains("uefi") {
+ cx.type_func(&[cx.type_ptr(), cx.type_ptr()], cx.type_isize())
+ } else if cx.sess().target.main_needs_argc_argv {
cx.type_func(&[cx.type_int(), cx.type_ptr()], cx.type_int())
} else {
cx.type_func(&[], cx.type_int())
@@ -483,8 +487,12 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
};
let result = bx.call(start_ty, None, None, start_fn, &args, None);
- let cast = bx.intcast(result, cx.type_int(), true);
- bx.ret(cast);
+ if cx.sess().target.os.contains("uefi") {
+ bx.ret(result);
+ } else {
+ let cast = bx.intcast(result, cx.type_int(), true);
+ bx.ret(cast);
+ }
llfn
}
@@ -495,7 +503,17 @@ fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
bx: &mut Bx,
) -> (Bx::Value, Bx::Value) {
- if cx.sess().target.main_needs_argc_argv {
+ if cx.sess().target.os.contains("uefi") {
+ // Params for UEFI
+ let param_handle = bx.get_param(0);
+ let param_system_table = bx.get_param(1);
+ let arg_argc = bx.const_int(cx.type_isize(), 2);
+ let arg_argv = bx.alloca(cx.type_array(cx.type_ptr(), 2), Align::ONE);
+ bx.store(param_handle, arg_argv, Align::ONE);
+ let arg_argv_el1 = bx.gep(cx.type_ptr(), arg_argv, &[bx.const_int(cx.type_int(), 1)]);
+ bx.store(param_system_table, arg_argv_el1, Align::ONE);
+ (arg_argc, arg_argv)
+ } else if cx.sess().target.main_needs_argc_argv {
// Params from native `main()` used as args for rust start function
let param_argc = bx.get_param(0);
let param_argv = bx.get_param(1);
@@ -839,7 +857,6 @@ impl CrateInfo {
dependency_formats: tcx.dependency_formats(()).clone(),
windows_subsystem,
natvis_debugger_visualizers: Default::default(),
- feature_packed_bundled_libs: tcx.features().packed_bundled_libs,
};
let crates = tcx.crates(());
diff --git a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
index f6936c80b..59efe4cd3 100644
--- a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
+++ b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs
@@ -16,7 +16,10 @@ use rustc_target::spec::{abi, SanitizerSet};
use crate::errors;
use crate::target_features::from_target_feature;
-use crate::{errors::ExpectedUsedSymbol, target_features::check_target_feature_trait_unsafe};
+use crate::{
+ errors::{ExpectedCoverageSymbol, ExpectedUsedSymbol},
+ target_features::check_target_feature_trait_unsafe,
+};
fn linkage_by_name(tcx: TyCtxt<'_>, def_id: LocalDefId, name: &str) -> Linkage {
use rustc_middle::mir::mono::Linkage::*;
@@ -128,7 +131,21 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs {
.emit();
}
}
- sym::no_coverage => codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_COVERAGE,
+ sym::coverage => {
+ let inner = attr.meta_item_list();
+ match inner.as_deref() {
+ Some([item]) if item.has_name(sym::off) => {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_COVERAGE;
+ }
+ Some([item]) if item.has_name(sym::on) => {
+ // Allow #[coverage(on)] for being explicit, maybe also in future to enable
+ // coverage on a smaller scope within an excluded larger scope.
+ }
+ Some(_) | None => {
+ tcx.sess.emit_err(ExpectedCoverageSymbol { span: attr.span });
+ }
+ }
+ }
sym::rustc_std_internal_symbol => {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL
}
diff --git a/compiler/rustc_codegen_ssa/src/common.rs b/compiler/rustc_codegen_ssa/src/common.rs
index 5a6807599..641ac3eb8 100644
--- a/compiler/rustc_codegen_ssa/src/common.rs
+++ b/compiler/rustc_codegen_ssa/src/common.rs
@@ -1,7 +1,7 @@
#![allow(non_camel_case_types)]
use rustc_hir::LangItem;
-use rustc_middle::mir::interpret::ConstValue;
+use rustc_middle::mir;
use rustc_middle::ty::{self, layout::TyAndLayout, Ty, TyCtxt};
use rustc_span::Span;
@@ -194,10 +194,10 @@ pub fn shift_mask_val<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
pub fn asm_const_to_str<'tcx>(
tcx: TyCtxt<'tcx>,
sp: Span,
- const_value: ConstValue<'tcx>,
+ const_value: mir::ConstValue<'tcx>,
ty_and_layout: TyAndLayout<'tcx>,
) -> String {
- let ConstValue::Scalar(scalar) = const_value else {
+ let mir::ConstValue::Scalar(scalar) = const_value else {
span_bug!(sp, "expected Scalar for promoted asm const, but got {:#?}", const_value)
};
let value = scalar.assert_bits(ty_and_layout.size);
diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
index 067c824ab..989df448a 100644
--- a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
+++ b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
@@ -106,14 +106,14 @@ fn push_debuginfo_type_name<'tcx>(
ty_and_layout,
&|output, visited| {
push_item_name(tcx, def.did(), true, output);
- push_generic_params_internal(tcx, args, output, visited);
+ push_generic_params_internal(tcx, args, def.did(), output, visited);
},
output,
visited,
);
} else {
push_item_name(tcx, def.did(), qualified, output);
- push_generic_params_internal(tcx, args, output, visited);
+ push_generic_params_internal(tcx, args, def.did(), output, visited);
}
}
ty::Tuple(component_types) => {
@@ -237,8 +237,13 @@ fn push_debuginfo_type_name<'tcx>(
let principal =
tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), principal);
push_item_name(tcx, principal.def_id, qualified, output);
- let principal_has_generic_params =
- push_generic_params_internal(tcx, principal.args, output, visited);
+ let principal_has_generic_params = push_generic_params_internal(
+ tcx,
+ principal.args,
+ principal.def_id,
+ output,
+ visited,
+ );
let projection_bounds: SmallVec<[_; 4]> = trait_data
.projection_bounds()
@@ -421,7 +426,6 @@ fn push_debuginfo_type_name<'tcx>(
| ty::Placeholder(..)
| ty::Alias(..)
| ty::Bound(..)
- | ty::GeneratorWitnessMIR(..)
| ty::GeneratorWitness(..) => {
bug!(
"debuginfo: Trying to create type name for \
@@ -516,7 +520,13 @@ pub fn compute_debuginfo_vtable_name<'tcx>(
tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), trait_ref);
push_item_name(tcx, trait_ref.def_id, true, &mut vtable_name);
visited.clear();
- push_generic_params_internal(tcx, trait_ref.args, &mut vtable_name, &mut visited);
+ push_generic_params_internal(
+ tcx,
+ trait_ref.args,
+ trait_ref.def_id,
+ &mut vtable_name,
+ &mut visited,
+ );
} else {
vtable_name.push('_');
}
@@ -610,20 +620,20 @@ fn push_unqualified_item_name(
fn push_generic_params_internal<'tcx>(
tcx: TyCtxt<'tcx>,
args: GenericArgsRef<'tcx>,
+ def_id: DefId,
output: &mut String,
visited: &mut FxHashSet<Ty<'tcx>>,
) -> bool {
- if args.non_erasable_generics().next().is_none() {
+ debug_assert_eq!(args, tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args));
+ let mut args = args.non_erasable_generics(tcx, def_id).peekable();
+ if args.peek().is_none() {
return false;
}
-
- debug_assert_eq!(args, tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args));
-
let cpp_like_debuginfo = cpp_like_debuginfo(tcx);
output.push('<');
- for type_parameter in args.non_erasable_generics() {
+ for type_parameter in args {
match type_parameter {
GenericArgKind::Type(type_parameter) => {
push_debuginfo_type_name(tcx, type_parameter, true, output, visited);
@@ -649,12 +659,12 @@ fn push_const_param<'tcx>(tcx: TyCtxt<'tcx>, ct: ty::Const<'tcx>, output: &mut S
}
_ => match ct.ty().kind() {
ty::Int(ity) => {
- let bits = ct.eval_bits(tcx, ty::ParamEnv::reveal_all(), ct.ty());
+ let bits = ct.eval_bits(tcx, ty::ParamEnv::reveal_all());
let val = Integer::from_int_ty(&tcx, *ity).size().sign_extend(bits) as i128;
write!(output, "{val}")
}
ty::Uint(_) => {
- let val = ct.eval_bits(tcx, ty::ParamEnv::reveal_all(), ct.ty());
+ let val = ct.eval_bits(tcx, ty::ParamEnv::reveal_all());
write!(output, "{val}")
}
ty::Bool => {
@@ -670,10 +680,8 @@ fn push_const_param<'tcx>(tcx: TyCtxt<'tcx>, ct: ty::Const<'tcx>, output: &mut S
// avoiding collisions and will make the emitted type names shorter.
let hash_short = tcx.with_stable_hashing_context(|mut hcx| {
let mut hasher = StableHasher::new();
- let ct = ct.eval(tcx, ty::ParamEnv::reveal_all());
- hcx.while_hashing_spans(false, |hcx| {
- ct.to_valtree().hash_stable(hcx, &mut hasher)
- });
+ let ct = ct.eval(tcx, ty::ParamEnv::reveal_all(), None).unwrap();
+ hcx.while_hashing_spans(false, |hcx| ct.hash_stable(hcx, &mut hasher));
hasher.finish::<Hash64>()
});
@@ -691,11 +699,12 @@ fn push_const_param<'tcx>(tcx: TyCtxt<'tcx>, ct: ty::Const<'tcx>, output: &mut S
pub fn push_generic_params<'tcx>(
tcx: TyCtxt<'tcx>,
args: GenericArgsRef<'tcx>,
+ def_id: DefId,
output: &mut String,
) {
let _prof = tcx.prof.generic_activity("compute_debuginfo_type_name");
let mut visited = FxHashSet::default();
- push_generic_params_internal(tcx, args, output, &mut visited);
+ push_generic_params_internal(tcx, args, def_id, output, &mut visited);
}
fn push_closure_or_generator_name<'tcx>(
@@ -738,7 +747,7 @@ fn push_closure_or_generator_name<'tcx>(
// Truncate the args to the length of the above generics. This will cut off
// anything closure- or generator-specific.
let args = args.truncate_to(tcx, generics);
- push_generic_params_internal(tcx, args, output, visited);
+ push_generic_params_internal(tcx, args, enclosing_fn_def_id, output, visited);
}
fn push_close_angle_bracket(cpp_like_debuginfo: bool, output: &mut String) {
diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs
index b7d8b9b45..14311ec08 100644
--- a/compiler/rustc_codegen_ssa/src/errors.rs
+++ b/compiler/rustc_codegen_ssa/src/errors.rs
@@ -7,6 +7,7 @@ use rustc_errors::{
IntoDiagnosticArg,
};
use rustc_macros::Diagnostic;
+use rustc_middle::ty::layout::LayoutError;
use rustc_middle::ty::Ty;
use rustc_span::{Span, Symbol};
use rustc_type_ir::FloatTy;
@@ -107,10 +108,6 @@ pub struct CreateTempDir {
}
#[derive(Diagnostic)]
-#[diag(codegen_ssa_incompatible_linking_modifiers)]
-pub struct IncompatibleLinkingModifiers;
-
-#[derive(Diagnostic)]
#[diag(codegen_ssa_add_native_library)]
pub struct AddNativeLibrary {
pub library_path: PathBuf,
@@ -561,6 +558,13 @@ pub struct UnknownArchiveKind<'a> {
}
#[derive(Diagnostic)]
+#[diag(codegen_ssa_expected_coverage_symbol)]
+pub struct ExpectedCoverageSymbol {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
#[diag(codegen_ssa_expected_used_symbol)]
pub struct ExpectedUsedSymbol {
#[primary_span]
@@ -588,20 +592,6 @@ pub struct InvalidWindowsSubsystem {
}
#[derive(Diagnostic)]
-#[diag(codegen_ssa_erroneous_constant)]
-pub struct ErroneousConstant {
- #[primary_span]
- pub span: Span,
-}
-
-#[derive(Diagnostic)]
-#[diag(codegen_ssa_polymorphic_constant_too_generic)]
-pub struct PolymorphicConstantTooGeneric {
- #[primary_span]
- pub span: Span,
-}
-
-#[derive(Diagnostic)]
#[diag(codegen_ssa_shuffle_indices_evaluation)]
pub struct ShuffleIndicesEvaluation {
#[primary_span]
@@ -1031,6 +1021,15 @@ pub struct TargetFeatureSafeTrait {
}
#[derive(Diagnostic)]
+#[diag(codegen_ssa_failed_to_get_layout)]
+pub struct FailedToGetLayout<'tcx> {
+ #[primary_span]
+ pub span: Span,
+ pub ty: Ty<'tcx>,
+ pub err: LayoutError<'tcx>,
+}
+
+#[derive(Diagnostic)]
#[diag(codegen_ssa_error_creating_remark_dir)]
pub struct ErrorCreatingRemarkDir {
pub error: std::io::Error,
diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs
index 7bed3fa61..f6186a290 100644
--- a/compiler/rustc_codegen_ssa/src/lib.rs
+++ b/compiler/rustc_codegen_ssa/src/lib.rs
@@ -31,7 +31,7 @@ use rustc_middle::dep_graph::WorkProduct;
use rustc_middle::middle::debugger_visualizer::DebuggerVisualizerFile;
use rustc_middle::middle::dependency_format::Dependencies;
use rustc_middle::middle::exported_symbols::SymbolExportKind;
-use rustc_middle::query::{ExternProviders, Providers};
+use rustc_middle::util::Providers;
use rustc_serialize::opaque::{FileEncoder, MemDecoder};
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use rustc_session::config::{CrateType, OutputFilenames, OutputType, RUST_CGU_EXT};
@@ -164,7 +164,6 @@ pub struct CrateInfo {
pub dependency_formats: Lrc<Dependencies>,
pub windows_subsystem: Option<String>,
pub natvis_debugger_visualizers: BTreeSet<DebuggerVisualizerFile>,
- pub feature_packed_bundled_libs: bool, // unstable feature flag.
}
#[derive(Encodable, Decodable)]
@@ -190,10 +189,6 @@ pub fn provide(providers: &mut Providers) {
crate::codegen_attrs::provide(providers);
}
-pub fn provide_extern(providers: &mut ExternProviders) {
- crate::back::symbol_export::provide_extern(providers);
-}
-
/// Checks if the given filename ends with the `.rcgu.o` extension that `rustc`
/// uses for the object files it generates.
pub fn looks_like_rust_object_file(filename: &str) -> bool {
diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
index 22c1f0597..d9419dbc9 100644
--- a/compiler/rustc_codegen_ssa/src/mir/analyze.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
@@ -234,7 +234,7 @@ impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx>
| PlaceContext::NonMutatingUse(
NonMutatingUseContext::Inspect
| NonMutatingUseContext::SharedBorrow
- | NonMutatingUseContext::ShallowBorrow
+ | NonMutatingUseContext::FakeBorrow
| NonMutatingUseContext::AddressOf
| NonMutatingUseContext::Projection,
) => {
@@ -284,8 +284,8 @@ pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKi
for (bb, data) in mir.basic_blocks.iter_enumerated() {
match data.terminator().kind {
TerminatorKind::Goto { .. }
- | TerminatorKind::Resume
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindResume
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
| TerminatorKind::GeneratorDrop
| TerminatorKind::Unreachable
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index 4f26383ed..bd0707edf 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -12,7 +12,7 @@ use crate::MemFlags;
use rustc_ast as ast;
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_hir::lang_items::LangItem;
-use rustc_middle::mir::{self, AssertKind, SwitchTargets};
+use rustc_middle::mir::{self, AssertKind, SwitchTargets, UnwindTerminateReason};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, ValidityRequirement};
use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
use rustc_middle::ty::{self, Instance, Ty};
@@ -178,7 +178,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
mir::UnwindAction::Cleanup(cleanup) => Some(self.llbb_with_cleanup(fx, cleanup)),
mir::UnwindAction::Continue => None,
mir::UnwindAction::Unreachable => None,
- mir::UnwindAction::Terminate => {
+ mir::UnwindAction::Terminate(reason) => {
if fx.mir[self.bb].is_cleanup && base::wants_new_eh_instructions(fx.cx.tcx().sess) {
// MSVC SEH will abort automatically if an exception tries to
// propagate out from cleanup.
@@ -191,7 +191,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
None
} else {
- Some(fx.terminate_block())
+ Some(fx.terminate_block(reason))
}
}
};
@@ -264,7 +264,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
) -> MergingSucc {
let unwind_target = match unwind {
mir::UnwindAction::Cleanup(cleanup) => Some(self.llbb_with_cleanup(fx, cleanup)),
- mir::UnwindAction::Terminate => Some(fx.terminate_block()),
+ mir::UnwindAction::Terminate(reason) => Some(fx.terminate_block(reason)),
mir::UnwindAction::Continue => None,
mir::UnwindAction::Unreachable => None,
};
@@ -416,7 +416,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
- PassMode::Cast(cast_ty, _) => {
+ PassMode::Cast { cast: cast_ty, pad_i32: _ } => {
let op = match self.locals[mir::RETURN_PLACE] {
LocalRef::Operand(op) => op,
LocalRef::PendingOperand => bug!("use of return before def"),
@@ -649,12 +649,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
helper: TerminatorCodegenHelper<'tcx>,
bx: &mut Bx,
terminator: &mir::Terminator<'tcx>,
+ reason: UnwindTerminateReason,
) {
let span = terminator.source_info.span;
self.set_debug_loc(bx, terminator.source_info);
// Obtain the panic entry point.
- let (fn_abi, llfn) = common::build_langcall(bx, Some(span), LangItem::PanicCannotUnwind);
+ let (fn_abi, llfn) = common::build_langcall(bx, Some(span), reason.lang_item());
// Codegen the actual panic invoke/call.
let merging_succ = helper.do_call(
@@ -927,21 +928,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// we get a value of a built-in pointer type.
//
// This is also relevant for `Pin<&mut Self>`, where we need to peel the `Pin`.
- 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
- && !op.layout.ty.is_ref()
- {
- for i in 0..op.layout.fields.count() {
- let field = op.extract_field(bx, i);
- if !field.layout.is_zst() {
- // we found the one non-zero-sized field that is allowed
- // now find *its* non-zero-sized field, or stop if it's a
- // pointer
- op = field;
- continue 'descend_newtypes;
- }
- }
-
- span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
+ while !op.layout.ty.is_unsafe_ptr() && !op.layout.ty.is_ref() {
+ let (idx, _) = op.layout.non_1zst_field(bx).expect(
+ "not exactly one non-1-ZST field in a `DispatchFromDyn` type",
+ );
+ op = op.extract_field(bx, idx);
}
// now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
@@ -969,21 +960,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
Immediate(_) => {
// See comment above explaining why we peel these newtypes
- 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
- && !op.layout.ty.is_ref()
- {
- for i in 0..op.layout.fields.count() {
- let field = op.extract_field(bx, i);
- if !field.layout.is_zst() {
- // we found the one non-zero-sized field that is allowed
- // now find *its* non-zero-sized field, or stop if it's a
- // pointer
- op = field;
- continue 'descend_newtypes;
- }
- }
-
- span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
+ while !op.layout.ty.is_unsafe_ptr() && !op.layout.ty.is_ref() {
+ let (idx, _) = op.layout.non_1zst_field(bx).expect(
+ "not exactly one non-1-ZST field in a `DispatchFromDyn` type",
+ );
+ op = op.extract_field(bx, idx);
}
// Make sure that we've actually unwrapped the rcvr down
@@ -1107,9 +1088,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
InlineAsmOperandRef::InOut { reg, late, in_value, out_place }
}
mir::InlineAsmOperand::Const { ref value } => {
- let const_value = self
- .eval_mir_constant(value)
- .unwrap_or_else(|_| span_bug!(span, "asm const cannot be resolved"));
+ let const_value = self.eval_mir_constant(value);
let string = common::asm_const_to_str(
bx.tcx(),
span,
@@ -1119,8 +1098,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
InlineAsmOperandRef::Const { string }
}
mir::InlineAsmOperand::SymFn { ref value } => {
- let literal = self.monomorphize(value.literal);
- if let ty::FnDef(def_id, args) = *literal.ty().kind() {
+ let const_ = self.monomorphize(value.const_);
+ if let ty::FnDef(def_id, args) = *const_.ty().kind() {
let instance = ty::Instance::resolve_for_fn_ptr(
bx.tcx(),
ty::ParamEnv::reveal_all(),
@@ -1224,13 +1203,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.set_debug_loc(bx, terminator.source_info);
match terminator.kind {
- mir::TerminatorKind::Resume => {
+ mir::TerminatorKind::UnwindResume => {
self.codegen_resume_terminator(helper, bx);
MergingSucc::False
}
- mir::TerminatorKind::Terminate => {
- self.codegen_terminate_terminator(helper, bx, terminator);
+ mir::TerminatorKind::UnwindTerminate(reason) => {
+ self.codegen_terminate_terminator(helper, bx, terminator, reason);
MergingSucc::False
}
@@ -1329,7 +1308,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
) {
match arg.mode {
PassMode::Ignore => return,
- PassMode::Cast(_, true) => {
+ PassMode::Cast { pad_i32: true, .. } => {
// Fill padding with undef value, where applicable.
llargs.push(bx.const_undef(bx.reg_backend_type(&Reg::i32())));
}
@@ -1341,7 +1320,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
_ => bug!("codegen_argument: {:?} invalid for pair argument", op),
},
- PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => match op.val {
+ PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => match op.val {
Ref(a, Some(b), _) => {
llargs.push(a);
llargs.push(b);
@@ -1366,7 +1345,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
op.val.store(bx, scratch);
(scratch.llval, scratch.align, true)
}
- PassMode::Cast(..) => {
+ PassMode::Cast { .. } => {
let scratch = PlaceRef::alloca(bx, arg.layout);
op.val.store(bx, scratch);
(scratch.llval, scratch.align, true)
@@ -1419,7 +1398,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it.
- if let PassMode::Cast(ty, _) = &arg.mode {
+ if let PassMode::Cast { cast: ty, .. } = &arg.mode {
let llty = bx.cast_backend_type(ty);
llval = bx.load(llty, llval, align.min(arg.layout.align.abi));
} else {
@@ -1579,79 +1558,81 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
})
}
- fn terminate_block(&mut self) -> Bx::BasicBlock {
- self.terminate_block.unwrap_or_else(|| {
- let funclet;
- let llbb;
- let mut bx;
- if base::wants_msvc_seh(self.cx.sess()) {
- // This is a basic block that we're aborting the program for,
- // notably in an `extern` function. These basic blocks are inserted
- // so that we assert that `extern` functions do indeed not panic,
- // and if they do we abort the process.
- //
- // On MSVC these are tricky though (where we're doing funclets). If
- // we were to do a cleanuppad (like below) the normal functions like
- // `longjmp` would trigger the abort logic, terminating the
- // program. Instead we insert the equivalent of `catch(...)` for C++
- // which magically doesn't trigger when `longjmp` files over this
- // frame.
- //
- // Lots more discussion can be found on #48251 but this codegen is
- // modeled after clang's for:
- //
- // try {
- // foo();
- // } catch (...) {
- // bar();
- // }
- //
- // which creates an IR snippet like
- //
- // cs_terminate:
- // %cs = catchswitch within none [%cp_terminate] unwind to caller
- // cp_terminate:
- // %cp = catchpad within %cs [null, i32 64, null]
- // ...
-
- llbb = Bx::append_block(self.cx, self.llfn, "cs_terminate");
- let cp_llbb = Bx::append_block(self.cx, self.llfn, "cp_terminate");
-
- let mut cs_bx = Bx::build(self.cx, llbb);
- let cs = cs_bx.catch_switch(None, None, &[cp_llbb]);
-
- // The "null" here is actually a RTTI type descriptor for the
- // C++ personality function, but `catch (...)` has no type so
- // it's null. The 64 here is actually a bitfield which
- // represents that this is a catch-all block.
- bx = Bx::build(self.cx, cp_llbb);
- let null =
- bx.const_null(bx.type_ptr_ext(bx.cx().data_layout().instruction_address_space));
- let sixty_four = bx.const_i32(64);
- funclet = Some(bx.catch_pad(cs, &[null, sixty_four, null]));
- } else {
- llbb = Bx::append_block(self.cx, self.llfn, "terminate");
- bx = Bx::build(self.cx, llbb);
+ fn terminate_block(&mut self, reason: UnwindTerminateReason) -> Bx::BasicBlock {
+ if let Some((cached_bb, cached_reason)) = self.terminate_block && reason == cached_reason {
+ return cached_bb;
+ }
- let llpersonality = self.cx.eh_personality();
- bx.filter_landing_pad(llpersonality);
+ let funclet;
+ let llbb;
+ let mut bx;
+ if base::wants_msvc_seh(self.cx.sess()) {
+ // This is a basic block that we're aborting the program for,
+ // notably in an `extern` function. These basic blocks are inserted
+ // so that we assert that `extern` functions do indeed not panic,
+ // and if they do we abort the process.
+ //
+ // On MSVC these are tricky though (where we're doing funclets). If
+ // we were to do a cleanuppad (like below) the normal functions like
+ // `longjmp` would trigger the abort logic, terminating the
+ // program. Instead we insert the equivalent of `catch(...)` for C++
+ // which magically doesn't trigger when `longjmp` files over this
+ // frame.
+ //
+ // Lots more discussion can be found on #48251 but this codegen is
+ // modeled after clang's for:
+ //
+ // try {
+ // foo();
+ // } catch (...) {
+ // bar();
+ // }
+ //
+ // which creates an IR snippet like
+ //
+ // cs_terminate:
+ // %cs = catchswitch within none [%cp_terminate] unwind to caller
+ // cp_terminate:
+ // %cp = catchpad within %cs [null, i32 64, null]
+ // ...
+
+ llbb = Bx::append_block(self.cx, self.llfn, "cs_terminate");
+ let cp_llbb = Bx::append_block(self.cx, self.llfn, "cp_terminate");
+
+ let mut cs_bx = Bx::build(self.cx, llbb);
+ let cs = cs_bx.catch_switch(None, None, &[cp_llbb]);
+
+ // The "null" here is actually a RTTI type descriptor for the
+ // C++ personality function, but `catch (...)` has no type so
+ // it's null. The 64 here is actually a bitfield which
+ // represents that this is a catch-all block.
+ bx = Bx::build(self.cx, cp_llbb);
+ let null =
+ bx.const_null(bx.type_ptr_ext(bx.cx().data_layout().instruction_address_space));
+ let sixty_four = bx.const_i32(64);
+ funclet = Some(bx.catch_pad(cs, &[null, sixty_four, null]));
+ } else {
+ llbb = Bx::append_block(self.cx, self.llfn, "terminate");
+ bx = Bx::build(self.cx, llbb);
- funclet = None;
- }
+ let llpersonality = self.cx.eh_personality();
+ bx.filter_landing_pad(llpersonality);
- self.set_debug_loc(&mut bx, mir::SourceInfo::outermost(self.mir.span));
+ funclet = None;
+ }
- let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, LangItem::PanicCannotUnwind);
- let fn_ty = bx.fn_decl_backend_type(&fn_abi);
+ self.set_debug_loc(&mut bx, mir::SourceInfo::outermost(self.mir.span));
- let llret = bx.call(fn_ty, None, Some(&fn_abi), fn_ptr, &[], funclet.as_ref());
- bx.do_not_inline(llret);
+ let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, reason.lang_item());
+ let fn_ty = bx.fn_decl_backend_type(&fn_abi);
- bx.unreachable();
+ let llret = bx.call(fn_ty, None, Some(&fn_abi), fn_ptr, &[], funclet.as_ref());
+ bx.do_not_inline(llret);
- self.terminate_block = Some(llbb);
- llbb
- })
+ bx.unreachable();
+
+ self.terminate_block = Some((llbb, reason));
+ llbb
}
/// Get the backend `BasicBlock` for a MIR `BasicBlock`, either already
@@ -1761,7 +1742,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
DirectOperand(index) => {
// If there is a cast, we have to store and reload.
- let op = if let PassMode::Cast(..) = ret_abi.mode {
+ let op = if let PassMode::Cast { .. } = ret_abi.mode {
let tmp = PlaceRef::alloca(bx, ret_abi.layout);
tmp.storage_live(bx);
bx.store_arg(&ret_abi, llval, tmp);
diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs
index babcf9bee..fde4e85f9 100644
--- a/compiler/rustc_codegen_ssa/src/mir/constant.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs
@@ -2,7 +2,7 @@ use crate::errors;
use crate::mir::operand::OperandRef;
use crate::traits::*;
use rustc_middle::mir;
-use rustc_middle::mir::interpret::{ConstValue, ErrorHandled};
+use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::ty::layout::HasTyCtxt;
use rustc_middle::ty::{self, Ty};
use rustc_target::abi::Abi;
@@ -13,74 +13,44 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn eval_mir_constant_to_operand(
&self,
bx: &mut Bx,
- constant: &mir::Constant<'tcx>,
- ) -> Result<OperandRef<'tcx, Bx::Value>, ErrorHandled> {
- let val = self.eval_mir_constant(constant)?;
+ constant: &mir::ConstOperand<'tcx>,
+ ) -> OperandRef<'tcx, Bx::Value> {
+ let val = self.eval_mir_constant(constant);
let ty = self.monomorphize(constant.ty());
- Ok(OperandRef::from_const(bx, val, ty))
+ OperandRef::from_const(bx, val, ty)
}
- pub fn eval_mir_constant(
- &self,
- constant: &mir::Constant<'tcx>,
- ) -> Result<ConstValue<'tcx>, ErrorHandled> {
- let ct = self.monomorphize(constant.literal);
- let uv = match ct {
- mir::ConstantKind::Ty(ct) => match ct.kind() {
- ty::ConstKind::Unevaluated(uv) => uv.expand(),
- ty::ConstKind::Value(val) => {
- return Ok(self.cx.tcx().valtree_to_const_val((ct.ty(), val)));
- }
- err => span_bug!(
- constant.span,
- "encountered bad ConstKind after monomorphizing: {:?}",
- err
- ),
- },
- mir::ConstantKind::Unevaluated(uv, _) => uv,
- mir::ConstantKind::Val(val, _) => return Ok(val),
- };
-
- self.cx.tcx().const_eval_resolve(ty::ParamEnv::reveal_all(), uv, None).map_err(|err| {
- match err {
- ErrorHandled::Reported(_) => {
- self.cx.tcx().sess.emit_err(errors::ErroneousConstant { span: constant.span });
- }
- ErrorHandled::TooGeneric => {
- self.cx
- .tcx()
- .sess
- .diagnostic()
- .emit_bug(errors::PolymorphicConstantTooGeneric { span: constant.span });
- }
- }
- err
- })
+ pub fn eval_mir_constant(&self, constant: &mir::ConstOperand<'tcx>) -> mir::ConstValue<'tcx> {
+ self.monomorphize(constant.const_)
+ .eval(self.cx.tcx(), ty::ParamEnv::reveal_all(), Some(constant.span))
+ .expect("erroneous constant not captured by required_consts")
}
/// This is a convenience helper for `simd_shuffle_indices`. It has the precondition
- /// that the given `constant` is an `ConstantKind::Unevaluated` and must be convertible to
+ /// that the given `constant` is an `Const::Unevaluated` and must be convertible to
/// a `ValTree`. If you want a more general version of this, talk to `wg-const-eval` on zulip.
+ ///
+ /// Note that this function is cursed, since usually MIR consts should not be evaluated to valtrees!
pub fn eval_unevaluated_mir_constant_to_valtree(
&self,
- constant: &mir::Constant<'tcx>,
+ constant: &mir::ConstOperand<'tcx>,
) -> Result<Option<ty::ValTree<'tcx>>, ErrorHandled> {
- let uv = match self.monomorphize(constant.literal) {
- mir::ConstantKind::Unevaluated(uv, _) => uv.shrink(),
- mir::ConstantKind::Ty(c) => match c.kind() {
+ let uv = match self.monomorphize(constant.const_) {
+ mir::Const::Unevaluated(uv, _) => uv.shrink(),
+ mir::Const::Ty(c) => match c.kind() {
// A constant that came from a const generic but was then used as an argument to old-style
// simd_shuffle (passing as argument instead of as a generic param).
rustc_type_ir::ConstKind::Value(valtree) => return Ok(Some(valtree)),
other => span_bug!(constant.span, "{other:#?}"),
},
- // We should never encounter `ConstantKind::Val` unless MIR opts (like const prop) evaluate
+ // We should never encounter `Const::Val` unless MIR opts (like const prop) evaluate
// a constant and write that value back into `Operand`s. This could happen, but is unlikely.
// Also: all users of `simd_shuffle` are on unstable and already need to take a lot of care
// around intrinsics. For an issue to happen here, it would require a macro expanding to a
// `simd_shuffle` call without wrapping the constant argument in a `const {}` block, but
// the user pass through arbitrary expressions.
// FIXME(oli-obk): replace the magic const generic argument of `simd_shuffle` with a real
- // const generic.
+ // const generic, and get rid of this entire function.
other => span_bug!(constant.span, "{other:#?}"),
};
let uv = self.monomorphize(uv);
@@ -95,7 +65,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn simd_shuffle_indices(
&mut self,
bx: &Bx,
- constant: &mir::Constant<'tcx>,
+ constant: &mir::ConstOperand<'tcx>,
) -> (Bx::Value, Ty<'tcx>) {
let ty = self.monomorphize(constant.ty());
let val = self
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
index 526c16a59..0dc30d21c 100644
--- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -1,10 +1,12 @@
use crate::traits::*;
+use rustc_data_structures::fx::FxHashMap;
use rustc_index::IndexVec;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir;
use rustc_middle::ty;
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
+use rustc_middle::ty::Instance;
use rustc_middle::ty::Ty;
use rustc_session::config::DebugInfo;
use rustc_span::symbol::{kw, Symbol};
@@ -17,10 +19,13 @@ use super::{FunctionCx, LocalRef};
use std::ops::Range;
-pub struct FunctionDebugContext<S, L> {
+pub struct FunctionDebugContext<'tcx, S, L> {
+ /// Maps from source code to the corresponding debug info scope.
pub scopes: IndexVec<mir::SourceScope, DebugScope<S, L>>,
-}
+ /// Maps from an inlined function to its debug info declaration.
+ pub inlined_function_scopes: FxHashMap<Instance<'tcx>, S>,
+}
#[derive(Copy, Clone)]
pub enum VariableKind {
ArgumentVariable(usize /*index*/),
@@ -153,8 +158,7 @@ fn calculate_debuginfo_offset<
L: DebugInfoOffsetLocation<'tcx, Bx>,
>(
bx: &mut Bx,
- local: mir::Local,
- var: &PerLocalVarDebugInfo<'tcx, Bx::DIVariable>,
+ projection: &[mir::PlaceElem<'tcx>],
base: L,
) -> DebugInfoOffset<L> {
let mut direct_offset = Size::ZERO;
@@ -162,7 +166,7 @@ fn calculate_debuginfo_offset<
let mut indirect_offsets = vec![];
let mut place = base;
- for elem in &var.projection[..] {
+ for elem in projection {
match *elem {
mir::ProjectionElem::Deref => {
indirect_offsets.push(Size::ZERO);
@@ -183,11 +187,7 @@ fn calculate_debuginfo_offset<
} => {
let offset = indirect_offsets.last_mut().unwrap_or(&mut direct_offset);
let FieldsShape::Array { stride, count: _ } = place.layout().fields else {
- span_bug!(
- var.source_info.span,
- "ConstantIndex on non-array type {:?}",
- place.layout()
- )
+ bug!("ConstantIndex on non-array type {:?}", place.layout())
};
*offset += stride * index;
place = place.project_constant_index(bx, index);
@@ -195,11 +195,7 @@ fn calculate_debuginfo_offset<
_ => {
// Sanity check for `can_use_in_debuginfo`.
debug_assert!(!elem.can_use_in_debuginfo());
- span_bug!(
- var.source_info.span,
- "unsupported var debuginfo place `{:?}`",
- mir::Place { local, projection: var.projection },
- )
+ bug!("unsupported var debuginfo projection `{:?}`", projection)
}
}
}
@@ -402,7 +398,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let Some(dbg_loc) = self.dbg_loc(var.source_info) else { return };
let DebugInfoOffset { direct_offset, indirect_offsets, result: _ } =
- calculate_debuginfo_offset(bx, local, &var, base.layout);
+ calculate_debuginfo_offset(bx, &var.projection, base.layout);
// When targeting MSVC, create extra allocas for arguments instead of pointing multiple
// dbg_var_addr() calls into the same alloca with offsets. MSVC uses CodeView records
@@ -420,7 +416,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if should_create_individual_allocas {
let DebugInfoOffset { direct_offset: _, indirect_offsets: _, result: place } =
- calculate_debuginfo_offset(bx, local, &var, base);
+ calculate_debuginfo_offset(bx, &var.projection, base);
// Create a variable which will be a pointer to the actual value
let ptr_ty = Ty::new_ptr(
@@ -484,54 +480,75 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
None
};
- let dbg_var = dbg_scope_and_span.map(|(dbg_scope, _, span)| {
- let (var_ty, var_kind) = match var.value {
+ let var_ty = if let Some(ref fragment) = var.composite {
+ self.monomorphize(fragment.ty)
+ } else {
+ match var.value {
mir::VarDebugInfoContents::Place(place) => {
- let var_ty = self.monomorphized_place_ty(place.as_ref());
- let var_kind = if let Some(arg_index) = var.argument_index
- && place.projection.is_empty()
- {
- let arg_index = arg_index as usize;
- if target_is_msvc {
- // ScalarPair parameters are spilled to the stack so they need to
- // be marked as a `LocalVariable` for MSVC debuggers to visualize
- // their data correctly. (See #81894 & #88625)
- let var_ty_layout = self.cx.layout_of(var_ty);
- if let Abi::ScalarPair(_, _) = var_ty_layout.abi {
- VariableKind::LocalVariable
- } else {
- VariableKind::ArgumentVariable(arg_index)
- }
- } else {
- // FIXME(eddyb) shouldn't `ArgumentVariable` indices be
- // offset in closures to account for the hidden environment?
- VariableKind::ArgumentVariable(arg_index)
- }
- } else {
- VariableKind::LocalVariable
- };
- (var_ty, var_kind)
- }
- mir::VarDebugInfoContents::Const(c) => {
- let ty = self.monomorphize(c.ty());
- (ty, VariableKind::LocalVariable)
+ self.monomorphized_place_ty(place.as_ref())
}
- mir::VarDebugInfoContents::Composite { ty, fragments: _ } => {
- let ty = self.monomorphize(ty);
- (ty, VariableKind::LocalVariable)
+ mir::VarDebugInfoContents::Const(c) => self.monomorphize(c.ty()),
+ }
+ };
+
+ let dbg_var = dbg_scope_and_span.map(|(dbg_scope, _, span)| {
+ let var_kind = if let Some(arg_index) = var.argument_index
+ && var.composite.is_none()
+ && let mir::VarDebugInfoContents::Place(place) = var.value
+ && place.projection.is_empty()
+ {
+ let arg_index = arg_index as usize;
+ if target_is_msvc {
+ // ScalarPair parameters are spilled to the stack so they need to
+ // be marked as a `LocalVariable` for MSVC debuggers to visualize
+ // their data correctly. (See #81894 & #88625)
+ let var_ty_layout = self.cx.layout_of(var_ty);
+ if let Abi::ScalarPair(_, _) = var_ty_layout.abi {
+ VariableKind::LocalVariable
+ } else {
+ VariableKind::ArgumentVariable(arg_index)
+ }
+ } else {
+ // FIXME(eddyb) shouldn't `ArgumentVariable` indices be
+ // offset in closures to account for the hidden environment?
+ VariableKind::ArgumentVariable(arg_index)
}
+ } else {
+ VariableKind::LocalVariable
};
self.cx.create_dbg_var(var.name, var_ty, dbg_scope, var_kind, span)
});
+ let fragment = if let Some(ref fragment) = var.composite {
+ let var_layout = self.cx.layout_of(var_ty);
+
+ let DebugInfoOffset { direct_offset, indirect_offsets, result: fragment_layout } =
+ calculate_debuginfo_offset(bx, &fragment.projection, var_layout);
+ debug_assert!(indirect_offsets.is_empty());
+
+ if fragment_layout.size == Size::ZERO {
+ // Fragment is a ZST, so does not represent anything. Avoid generating anything
+ // as this may conflict with a fragment that covers the entire variable.
+ continue;
+ } else if fragment_layout.size == var_layout.size {
+ // Fragment covers entire variable, so as far as
+ // DWARF is concerned, it's not really a fragment.
+ None
+ } else {
+ Some(direct_offset..direct_offset + fragment_layout.size)
+ }
+ } else {
+ None
+ };
+
match var.value {
mir::VarDebugInfoContents::Place(place) => {
per_local[place.local].push(PerLocalVarDebugInfo {
name: var.name,
source_info: var.source_info,
dbg_var,
- fragment: None,
+ fragment,
projection: place.projection,
});
}
@@ -539,59 +556,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if let Some(dbg_var) = dbg_var {
let Some(dbg_loc) = self.dbg_loc(var.source_info) else { continue };
- if let Ok(operand) = self.eval_mir_constant_to_operand(bx, &c) {
- self.set_debug_loc(bx, var.source_info);
- let base = Self::spill_operand_to_stack(
- operand,
- Some(var.name.to_string()),
- bx,
- );
+ let operand = self.eval_mir_constant_to_operand(bx, &c);
+ self.set_debug_loc(bx, var.source_info);
+ let base =
+ Self::spill_operand_to_stack(operand, Some(var.name.to_string()), bx);
- bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, Size::ZERO, &[], None);
- }
- }
- }
- mir::VarDebugInfoContents::Composite { ty, ref fragments } => {
- let var_ty = self.monomorphize(ty);
- let var_layout = self.cx.layout_of(var_ty);
- for fragment in fragments {
- let mut fragment_start = Size::ZERO;
- let mut fragment_layout = var_layout;
-
- for elem in &fragment.projection {
- match *elem {
- mir::ProjectionElem::Field(field, _) => {
- let i = field.index();
- fragment_start += fragment_layout.fields.offset(i);
- fragment_layout = fragment_layout.field(self.cx, i);
- }
- _ => span_bug!(
- var.source_info.span,
- "unsupported fragment projection `{:?}`",
- elem,
- ),
- }
- }
-
- let place = fragment.contents;
- let fragment = if fragment_layout.size == Size::ZERO {
- // Fragment is a ZST, so does not represent anything.
- continue;
- } else if fragment_layout.size == var_layout.size {
- // Fragment covers entire variable, so as far as
- // DWARF is concerned, it's not really a fragment.
- None
- } else {
- Some(fragment_start..fragment_start + fragment_layout.size)
- };
-
- per_local[place.local].push(PerLocalVarDebugInfo {
- name: var.name,
- source_info: var.source_info,
- dbg_var,
- fragment,
- projection: place.projection,
- });
+ bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, Size::ZERO, &[], fragment);
}
}
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
index 8821fb21f..8efef4405 100644
--- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -462,7 +462,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
if !fn_abi.ret.is_ignore() {
- if let PassMode::Cast(..) = &fn_abi.ret.mode {
+ if let PassMode::Cast { .. } = &fn_abi.ret.mode {
bx.store(llval, result.llval, result.align);
} else {
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs
index 3464f9108..a61018f98 100644
--- a/compiler/rustc_codegen_ssa/src/mir/mod.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs
@@ -3,8 +3,8 @@ use crate::traits::*;
use rustc_index::bit_set::BitSet;
use rustc_index::IndexVec;
use rustc_middle::mir;
-use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::mir::traversal;
+use rustc_middle::mir::UnwindTerminateReason;
use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, TyAndLayout};
use rustc_middle::ty::{self, Instance, Ty, TyCtxt, TypeFoldable, TypeVisitableExt};
use rustc_target::abi::call::{FnAbi, PassMode};
@@ -45,7 +45,7 @@ pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
mir: &'tcx mir::Body<'tcx>,
- debug_context: Option<FunctionDebugContext<Bx::DIScope, Bx::DILocation>>,
+ debug_context: Option<FunctionDebugContext<'tcx, Bx::DIScope, Bx::DILocation>>,
llfn: Bx::Function,
@@ -83,8 +83,8 @@ pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
/// Cached unreachable block
unreachable_block: Option<Bx::BasicBlock>,
- /// Cached terminate upon unwinding block
- terminate_block: Option<Bx::BasicBlock>,
+ /// Cached terminate upon unwinding block and its reason
+ terminate_block: Option<(Bx::BasicBlock, UnwindTerminateReason)>,
/// The location where each MIR arg/var/tmp/ret is stored. This is
/// usually an `PlaceRef` representing an alloca, but not always:
@@ -118,7 +118,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
T: Copy + TypeFoldable<TyCtxt<'tcx>>,
{
debug!("monomorphize: self.instance={:?}", self.instance);
- self.instance.subst_mir_and_normalize_erasing_regions(
+ self.instance.instantiate_mir_and_normalize_erasing_regions(
self.cx.tcx(),
ty::ParamEnv::reveal_all(),
ty::EarlyBinder::bind(value),
@@ -144,7 +144,7 @@ impl<'tcx, V: CodegenObject> LocalRef<'tcx, V> {
if layout.is_zst() {
// Zero-size temporaries aren't always initialized, which
// doesn't matter because they don't contain data, but
- // we need something in the operand.
+ // we need something sufficiently aligned in the operand.
LocalRef::Operand(OperandRef::zero_sized(layout))
} else {
LocalRef::PendingOperand
@@ -174,7 +174,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let mut start_bx = Bx::build(cx, start_llbb);
if mir.basic_blocks.iter().any(|bb| {
- bb.is_cleanup || matches!(bb.terminator().unwind(), Some(mir::UnwindAction::Terminate))
+ bb.is_cleanup || matches!(bb.terminator().unwind(), Some(mir::UnwindAction::Terminate(_)))
}) {
start_bx.set_personality_fn(cx.eh_personality());
}
@@ -211,23 +211,14 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut start_bx);
- // Evaluate all required consts; codegen later assumes that CTFE will never fail.
- let mut all_consts_ok = true;
- for const_ in &mir.required_consts {
- if let Err(err) = fx.eval_mir_constant(const_) {
- all_consts_ok = false;
- match err {
- // errored or at least linted
- ErrorHandled::Reported(_) => {}
- ErrorHandled::TooGeneric => {
- span_bug!(const_.span, "codegen encountered polymorphic constant: {:?}", err)
- }
- }
- }
- }
- if !all_consts_ok {
- // We leave the IR in some half-built state here, and rely on this code not even being
- // submitted to LLVM once an error was raised.
+ // Rust post-monomorphization checks; we later rely on them.
+ if let Err(err) =
+ mir.post_mono_checks(cx.tcx(), ty::ParamEnv::reveal_all(), |c| Ok(fx.monomorphize(c)))
+ {
+ err.emit_err(cx.tcx());
+ // This IR shouldn't ever be emitted, but let's try to guard against any of this code
+ // ever running.
+ start_bx.abort();
return;
}
@@ -326,7 +317,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
for i in 0..tupled_arg_tys.len() {
let arg = &fx.fn_abi.args[idx];
idx += 1;
- if let PassMode::Cast(_, true) = arg.mode {
+ if let PassMode::Cast { pad_i32: true, .. } = arg.mode {
llarg_idx += 1;
}
let pr_field = place.project_field(bx, i);
@@ -350,7 +341,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let arg = &fx.fn_abi.args[idx];
idx += 1;
- if let PassMode::Cast(_, true) = arg.mode {
+ if let PassMode::Cast { pad_i32: true, .. } = arg.mode {
llarg_idx += 1;
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
index f90d1a0fc..0ab2b7ecd 100644
--- a/compiler/rustc_codegen_ssa/src/mir/operand.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -6,8 +6,8 @@ use crate::glue;
use crate::traits::*;
use crate::MemFlags;
-use rustc_middle::mir;
-use rustc_middle::mir::interpret::{alloc_range, ConstValue, Pointer, Scalar};
+use rustc_middle::mir::interpret::{alloc_range, Pointer, Scalar};
+use rustc_middle::mir::{self, ConstValue};
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::Ty;
use rustc_target::abi::{self, Abi, Align, Size};
@@ -50,7 +50,8 @@ pub enum OperandValue<V> {
/// from [`ConstMethods::const_poison`].
///
/// An `OperandValue` *must* be this variant for any type for which
- /// `is_zst` on its `Layout` returns `true`.
+ /// `is_zst` on its `Layout` returns `true`. Note however that
+ /// these values can still require alignment.
ZeroSized,
}
@@ -85,7 +86,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &mut Bx,
- val: ConstValue<'tcx>,
+ val: mir::ConstValue<'tcx>,
ty: Ty<'tcx>,
) -> Self {
let layout = bx.layout_of(ty);
@@ -99,12 +100,12 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
OperandValue::Immediate(llval)
}
ConstValue::ZeroSized => return OperandRef::zero_sized(layout),
- ConstValue::Slice { data, start, end } => {
+ ConstValue::Slice { data, meta } => {
let Abi::ScalarPair(a_scalar, _) = layout.abi else {
bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
};
let a = Scalar::from_pointer(
- Pointer::new(bx.tcx().create_memory_alloc(data), Size::from_bytes(start)),
+ Pointer::new(bx.tcx().reserve_and_set_memory_alloc(data), Size::ZERO),
&bx.tcx(),
);
let a_llval = bx.scalar_to_backend(
@@ -112,10 +113,11 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
a_scalar,
bx.scalar_pair_element_backend_type(layout, 0, true),
);
- let b_llval = bx.const_usize((end - start) as u64);
+ let b_llval = bx.const_usize(meta);
OperandValue::Pair(a_llval, b_llval)
}
- ConstValue::ByRef { alloc, offset } => {
+ ConstValue::Indirect { alloc_id, offset } => {
+ let alloc = bx.tcx().global_alloc(alloc_id).unwrap_memory();
return Self::from_const_alloc(bx, layout, alloc, offset);
}
};
@@ -133,15 +135,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
assert_eq!(alloc_align, layout.align.abi);
let read_scalar = |start, size, s: abi::Scalar, ty| {
- let val = alloc
- .0
- .read_scalar(
- bx,
- alloc_range(start, size),
- /*read_provenance*/ matches!(s.primitive(), abi::Pointer(_)),
- )
- .unwrap();
- bx.scalar_to_backend(val, s, ty)
+ match alloc.0.read_scalar(
+ bx,
+ alloc_range(start, size),
+ /*read_provenance*/ matches!(s.primitive(), abi::Pointer(_)),
+ ) {
+ Ok(val) => bx.scalar_to_backend(val, s, ty),
+ Err(_) => bx.const_poison(ty),
+ }
};
// It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point.
@@ -154,7 +155,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => {
let size = s.size(bx);
assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
- let val = read_scalar(Size::ZERO, size, s, bx.type_ptr());
+ let val = read_scalar(offset, size, s, bx.backend_type(layout));
OperandRef { val: OperandValue::Immediate(val), layout }
}
Abi::ScalarPair(
@@ -162,10 +163,10 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
b @ abi::Scalar::Initialized { .. },
) => {
let (a_size, b_size) = (a.size(bx), b.size(bx));
- let b_offset = a_size.align_to(b.align(bx).abi);
+ let b_offset = (offset + a_size).align_to(b.align(bx).abi);
assert!(b_offset.bytes() > 0);
let a_val = read_scalar(
- Size::ZERO,
+ offset,
a_size,
a,
bx.scalar_pair_element_backend_type(layout, 0, true),
@@ -181,6 +182,8 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
_ if layout.is_zst() => OperandRef::zero_sized(layout),
_ => {
// Neither a scalar nor scalar pair. Load from a place
+ // FIXME: should we cache `const_data_from_alloc` to avoid repeating this for the
+ // same `ConstAllocation`?
let init = bx.const_data_from_alloc(alloc);
let base_addr = bx.static_addr_of(init, alloc_align, None);
@@ -568,12 +571,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.codegen_consume(bx, place.as_ref())
}
- mir::Operand::Constant(ref constant) => {
- // This cannot fail because we checked all required_consts in advance.
- self.eval_mir_constant_to_operand(bx, constant).unwrap_or_else(|_err| {
- span_bug!(constant.span, "erroneous constant not captured by required_consts")
- })
- }
+ mir::Operand::Constant(ref constant) => self.eval_mir_constant_to_operand(bx, constant),
}
}
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs
index e7c3906d9..eb590a45a 100644
--- a/compiler/rustc_codegen_ssa/src/mir/place.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/place.rs
@@ -114,7 +114,8 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
bx.struct_gep(ty, self.llval, 1)
}
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => {
- // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer.
+ // ZST fields (even some that require alignment) are not included in Scalar,
+ // ScalarPair, and Vector layouts, so manually offset the pointer.
bx.gep(bx.cx().type_i8(), self.llval, &[bx.const_usize(offset.bytes())])
}
Abi::Scalar(_) | Abi::ScalarPair(..) => {
@@ -462,7 +463,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
mir::ProjectionElem::Field(ref field, _) => {
cg_base.project_field(bx, field.index())
}
- mir::ProjectionElem::OpaqueCast(ty) => cg_base.project_type(bx, ty),
+ mir::ProjectionElem::OpaqueCast(ty) => {
+ bug!("encountered OpaqueCast({ty}) in codegen")
+ }
+ mir::ProjectionElem::Subtype(ty) => cg_base.project_type(bx, self.monomorphize(ty)),
mir::ProjectionElem::Index(index) => {
let index = &mir::Operand::Copy(mir::Place::from(index));
let index = self.codegen_operand(bx, index);
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index 07c61df21..fc8d33891 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -1004,6 +1004,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
mir::Rvalue::Aggregate(..) => {
let ty = rvalue.ty(self.mir, self.cx.tcx());
let ty = self.monomorphize(ty);
+ // For ZST this can be `OperandValueKind::ZeroSized`.
self.cx.spanned_layout_of(ty, span).is_zst()
}
}
diff --git a/compiler/rustc_codegen_ssa/src/traits/backend.rs b/compiler/rustc_codegen_ssa/src/traits/backend.rs
index 0a02ca6b3..ac8123bc1 100644
--- a/compiler/rustc_codegen_ssa/src/traits/backend.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/backend.rs
@@ -11,9 +11,9 @@ use rustc_data_structures::sync::{DynSend, DynSync};
use rustc_errors::ErrorGuaranteed;
use rustc_metadata::EncodedMetadata;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
-use rustc_middle::query::{ExternProviders, Providers};
use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf, TyAndLayout};
use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_middle::util::Providers;
use rustc_session::{
config::{self, OutputFilenames, PrintRequest},
cstore::MetadataLoaderDyn,
@@ -85,7 +85,6 @@ pub trait CodegenBackend {
}
fn provide(&self, _providers: &mut Providers) {}
- fn provide_extern(&self, _providers: &mut ExternProviders) {}
fn codegen_crate<'tcx>(
&self,
tcx: TyCtxt<'tcx>,
diff --git a/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs b/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs
index 63fecaf34..4acc0ea07 100644
--- a/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs
@@ -26,7 +26,7 @@ pub trait DebugInfoMethods<'tcx>: BackendTypes {
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
llfn: Self::Function,
mir: &mir::Body<'tcx>,
- ) -> Option<FunctionDebugContext<Self::DIScope, Self::DILocation>>;
+ ) -> Option<FunctionDebugContext<'tcx, Self::DIScope, Self::DILocation>>;
// FIXME(eddyb) find a common convention for all of the debuginfo-related
// names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
diff --git a/compiler/rustc_const_eval/messages.ftl b/compiler/rustc_const_eval/messages.ftl
index e5dd5729d..d23e2a9f3 100644
--- a/compiler/rustc_const_eval/messages.ftl
+++ b/compiler/rustc_const_eval/messages.ftl
@@ -83,9 +83,6 @@ const_eval_dyn_call_vtable_mismatch =
const_eval_dyn_star_call_vtable_mismatch =
`dyn*` call on a pointer whose vtable does not match its type
-const_eval_erroneous_constant =
- erroneous constant used
-
const_eval_error = {$error_kind ->
[static] could not evaluate static initializer
[const] evaluation of constant value failed
@@ -384,7 +381,7 @@ const_eval_unreachable_unwind =
const_eval_unsigned_offset_from_overflow =
`ptr_offset_from_unsigned` called when first pointer has smaller offset than second: {$a_offset} < {$b_offset}
-
+const_eval_unsized_local = unsized locals are not supported
const_eval_unstable_const_fn = `{$def_path}` is not yet stable as a const fn
const_eval_unstable_in_stable =
diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs
index d39a7e8a1..bf1e0a370 100644
--- a/compiler/rustc_const_eval/src/const_eval/error.rs
+++ b/compiler/rustc_const_eval/src/const_eval/error.rs
@@ -4,8 +4,7 @@ use rustc_errors::{DiagnosticArgValue, DiagnosticMessage, IntoDiagnostic, IntoDi
use rustc_middle::mir::AssertKind;
use rustc_middle::ty::TyCtxt;
use rustc_middle::ty::{layout::LayoutError, ConstInt};
-use rustc_span::source_map::Spanned;
-use rustc_span::{ErrorGuaranteed, Span, Symbol};
+use rustc_span::{ErrorGuaranteed, Span, Symbol, DUMMY_SP};
use super::InterpCx;
use crate::errors::{self, FrameNote, ReportErrorExt};
@@ -18,7 +17,6 @@ pub enum ConstEvalErrKind {
ModifiedGlobal,
AssertFailure(AssertKind<ConstInt>),
Panic { msg: Symbol, line: u32, col: u32, file: Symbol },
- Abort(String),
}
impl MachineStopType for ConstEvalErrKind {
@@ -30,7 +28,6 @@ impl MachineStopType for ConstEvalErrKind {
ModifiedGlobal => const_eval_modified_global,
Panic { .. } => const_eval_panic,
AssertFailure(x) => x.diagnostic_message(),
- Abort(msg) => msg.to_string().into(),
}
}
fn add_args(
@@ -39,7 +36,7 @@ impl MachineStopType for ConstEvalErrKind {
) {
use ConstEvalErrKind::*;
match *self {
- ConstAccessesStatic | ModifiedGlobal | Abort(_) => {}
+ ConstAccessesStatic | ModifiedGlobal => {}
AssertFailure(kind) => kind.add_args(adder),
Panic { msg, line, col, file } => {
adder("msg".into(), msg.into_diagnostic_arg());
@@ -134,35 +131,17 @@ where
{
// Special handling for certain errors
match error {
- // Don't emit a new diagnostic for these errors
+ // Don't emit a new diagnostic for these errors, they are already reported elsewhere or
+ // should remain silent.
err_inval!(Layout(LayoutError::Unknown(_))) | err_inval!(TooGeneric) => {
- ErrorHandled::TooGeneric
+ ErrorHandled::TooGeneric(span.unwrap_or(DUMMY_SP))
}
- err_inval!(AlreadyReported(guar)) => ErrorHandled::Reported(guar),
+ err_inval!(AlreadyReported(guar)) => ErrorHandled::Reported(guar, span.unwrap_or(DUMMY_SP)),
err_inval!(Layout(LayoutError::ReferencesError(guar))) => {
- ErrorHandled::Reported(guar.into())
- }
- err_inval!(Layout(layout_error @ LayoutError::SizeOverflow(_))) => {
- // We must *always* hard error on these, even if the caller wants just a lint.
- // The `message` makes little sense here, this is a more serious error than the
- // caller thinks anyway.
- // See <https://github.com/rust-lang/rust/pull/63152>.
- let (our_span, frames) = get_span_and_frames();
- let span = span.unwrap_or(our_span);
- let mut err =
- tcx.sess.create_err(Spanned { span, node: layout_error.into_diagnostic() });
- err.code(rustc_errors::error_code!(E0080));
- let Some((mut err, handler)) = err.into_diagnostic() else {
- panic!("did not emit diag");
- };
- for frame in frames {
- err.eager_subdiagnostic(handler, frame);
- }
-
- ErrorHandled::Reported(handler.emit_diagnostic(&mut err).unwrap().into())
+ ErrorHandled::Reported(guar.into(), span.unwrap_or(DUMMY_SP))
}
+ // Report remaining errors.
_ => {
- // Report as hard error.
let (our_span, frames) = get_span_and_frames();
let span = span.unwrap_or(our_span);
let err = mk(span, frames);
@@ -173,7 +152,7 @@ where
// Use *our* span to label the interp error
err.span_label(our_span, msg);
- ErrorHandled::Reported(err.emit().into())
+ ErrorHandled::Reported(err.emit().into(), span)
}
}
}
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
index 4c7e91944..3d758cd01 100644
--- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -4,9 +4,9 @@ use crate::errors::ConstEvalError;
use either::{Left, Right};
use rustc_hir::def::DefKind;
-use rustc_middle::mir;
use rustc_middle::mir::interpret::{ErrorHandled, InterpErrorInfo};
use rustc_middle::mir::pretty::write_allocation_bytes;
+use rustc_middle::mir::{self, ConstAlloc, ConstValue};
use rustc_middle::traits::Reveal;
use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::ty::print::with_no_trimmed_paths;
@@ -18,9 +18,8 @@ use super::{CanAccessStatics, CompileTimeEvalContext, CompileTimeInterpreter};
use crate::errors;
use crate::interpret::eval_nullary_intrinsic;
use crate::interpret::{
- intern_const_alloc_recursive, Allocation, ConstAlloc, ConstValue, CtfeValidationMode, GlobalId,
- Immediate, InternKind, InterpCx, InterpError, InterpResult, MPlaceTy, MemoryKind, OpTy,
- RefTracking, StackPopCleanup,
+ intern_const_alloc_recursive, CtfeValidationMode, GlobalId, Immediate, InternKind, InterpCx,
+ InterpError, InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking, StackPopCleanup,
};
// Returns a pointer to where the result lives
@@ -61,6 +60,7 @@ fn eval_body_using_ecx<'mir, 'tcx>(
&ret.clone().into(),
StackPopCleanup::Root { cleanup: false },
)?;
+ ecx.storage_live_for_always_live_locals()?;
// The main interpreter loop.
while ecx.step()? {}
@@ -78,7 +78,7 @@ fn eval_body_using_ecx<'mir, 'tcx>(
intern_const_alloc_recursive(ecx, intern_kind, &ret)?;
// we leave alignment checks off, since this `ecx` will not be used for further evaluation anyway
- debug!("eval_body_using_ecx done: {:?}", *ret);
+ debug!("eval_body_using_ecx done: {:?}", ret);
Ok(ret)
}
@@ -104,91 +104,75 @@ pub(super) fn mk_eval_cx<'mir, 'tcx>(
)
}
-/// This function converts an interpreter value into a constant that is meant for use in the
-/// type system.
+/// This function converts an interpreter value into a MIR constant.
#[instrument(skip(ecx), level = "debug")]
pub(super) fn op_to_const<'tcx>(
ecx: &CompileTimeEvalContext<'_, 'tcx>,
op: &OpTy<'tcx>,
) -> ConstValue<'tcx> {
- // We do not have value optimizations for everything.
- // Only scalars and slices, since they are very common.
- // Note that further down we turn scalars of uninitialized bits back to `ByRef`. These can result
- // from scalar unions that are initialized with one of their zero sized variants. We could
- // instead allow `ConstValue::Scalar` to store `ScalarMaybeUninit`, but that would affect all
- // the usual cases of extracting e.g. a `usize`, without there being a real use case for the
- // `Undef` situation.
- let try_as_immediate = match op.layout.abi {
+ // Handle ZST consistently and early.
+ if op.layout.is_zst() {
+ return ConstValue::ZeroSized;
+ }
+
+ // All scalar types should be stored as `ConstValue::Scalar`. This is needed to make
+ // `ConstValue::try_to_scalar` efficient; we want that to work for *all* constants of scalar
+ // type (it's used throughout the compiler and having it work just on literals is not enough)
+ // and we want it to be fast (i.e., don't go to an `Allocation` and reconstruct the `Scalar`
+ // from its byte-serialized form).
+ let force_as_immediate = match op.layout.abi {
Abi::Scalar(abi::Scalar::Initialized { .. }) => true,
- Abi::ScalarPair(..) => match op.layout.ty.kind() {
- ty::Ref(_, inner, _) => match *inner.kind() {
- ty::Slice(elem) => elem == ecx.tcx.types.u8,
- ty::Str => true,
- _ => false,
- },
- _ => false,
- },
+ // We don't *force* `ConstValue::Slice` for `ScalarPair`. This has the advantage that if the
+ // input `op` is a place, then turning it into a `ConstValue` and back into a `OpTy` will
+ // not have to generate any duplicate allocations (we preserve the original `AllocId` in
+ // `ConstValue::Indirect`). It means accessing the contents of a slice can be slow (since
+ // they can be stored as `ConstValue::Indirect`), but that's not relevant since we barely
+ // ever have to do this. (`try_get_slice_bytes_for_diagnostics` exists to provide this
+ // functionality.)
_ => false,
};
- let immediate = if try_as_immediate {
+ let immediate = if force_as_immediate {
Right(ecx.read_immediate(op).expect("normalization works on validated constants"))
} else {
- // It is guaranteed that any non-slice scalar pair is actually ByRef here.
- // When we come back from raw const eval, we are always by-ref. The only way our op here is
- // by-val is if we are in destructure_mir_constant, i.e., if this is (a field of) something that we
- // "tried to make immediate" before. We wouldn't do that for non-slice scalar pairs or
- // structs containing such.
op.as_mplace_or_imm()
};
debug!(?immediate);
- // We know `offset` is relative to the allocation, so we can use `into_parts`.
- let to_const_value = |mplace: &MPlaceTy<'_>| {
- debug!("to_const_value(mplace: {:?})", mplace);
- match mplace.ptr.into_parts() {
- (Some(alloc_id), offset) => {
- let alloc = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
- ConstValue::ByRef { alloc, offset }
- }
- (None, offset) => {
- assert!(mplace.layout.is_zst());
- assert_eq!(
- offset.bytes() % mplace.layout.align.abi.bytes(),
- 0,
- "this MPlaceTy must come from a validated constant, thus we can assume the \
- alignment is correct",
- );
- ConstValue::ZeroSized
- }
- }
- };
match immediate {
- Left(ref mplace) => to_const_value(mplace),
- // see comment on `let try_as_immediate` above
+ Left(ref mplace) => {
+ // We know `offset` is relative to the allocation, so we can use `into_parts`.
+ let (alloc_id, offset) = mplace.ptr().into_parts();
+ let alloc_id = alloc_id.expect("cannot have `fake` place fot non-ZST type");
+ ConstValue::Indirect { alloc_id, offset }
+ }
+ // see comment on `let force_as_immediate` above
Right(imm) => match *imm {
- _ if imm.layout.is_zst() => ConstValue::ZeroSized,
Immediate::Scalar(x) => ConstValue::Scalar(x),
Immediate::ScalarPair(a, b) => {
debug!("ScalarPair(a: {:?}, b: {:?})", a, b);
- // We know `offset` is relative to the allocation, so we can use `into_parts`.
- let (data, start) = match a.to_pointer(ecx).unwrap().into_parts() {
- (Some(alloc_id), offset) => {
- (ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes())
- }
- (None, _offset) => (
- ecx.tcx.mk_const_alloc(Allocation::from_bytes_byte_aligned_immutable(
- b"" as &[u8],
- )),
- 0,
+ // This codepath solely exists for `valtree_to_const_value` to not need to generate
+ // a `ConstValue::Indirect` for wide references, so it is tightly restricted to just
+ // that case.
+ let pointee_ty = imm.layout.ty.builtin_deref(false).unwrap().ty; // `false` = no raw ptrs
+ debug_assert!(
+ matches!(
+ ecx.tcx.struct_tail_without_normalization(pointee_ty).kind(),
+ ty::Str | ty::Slice(..),
),
- };
- let len = b.to_target_usize(ecx).unwrap();
- let start = start.try_into().unwrap();
- let len: usize = len.try_into().unwrap();
- ConstValue::Slice { data, start, end: start + len }
+ "`ConstValue::Slice` is for slice-tailed types only, but got {}",
+ imm.layout.ty,
+ );
+ let msg = "`op_to_const` on an immediate scalar pair must only be used on slice references to the beginning of an actual allocation";
+ // We know `offset` is relative to the allocation, so we can use `into_parts`.
+ let (alloc_id, offset) = a.to_pointer(ecx).expect(msg).into_parts();
+ let alloc_id = alloc_id.expect(msg);
+ let data = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
+ assert!(offset == abi::Size::ZERO, "{}", msg);
+ let meta = b.to_target_usize(ecx).expect(msg);
+ ConstValue::Slice { data, meta }
}
- Immediate::Uninit => to_const_value(&op.assert_mem_place()),
+ Immediate::Uninit => bug!("`Uninit` is not a valid value for {}", op.layout.ty),
},
}
}
@@ -234,7 +218,7 @@ pub fn eval_to_const_value_raw_provider<'tcx>(
key.param_env = key.param_env.with_user_facing();
match tcx.eval_to_const_value_raw(key) {
// try again with reveal all as requested
- Err(ErrorHandled::TooGeneric) => {}
+ Err(ErrorHandled::TooGeneric(_)) => {}
// deduplicate calls
other => return other,
}
@@ -281,7 +265,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
key.param_env = key.param_env.with_user_facing();
match tcx.eval_to_allocation_raw(key) {
// try again with reveal all as requested
- Err(ErrorHandled::TooGeneric) => {}
+ Err(ErrorHandled::TooGeneric(_)) => {}
// deduplicate calls
other => return other,
}
@@ -369,9 +353,9 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
inner = true;
}
};
- let alloc_id = mplace.ptr.provenance.unwrap();
+ let alloc_id = mplace.ptr().provenance.unwrap();
- // Validation failed, report an error. This is always a hard error.
+ // Validation failed, report an error.
if let Err(error) = validation {
let (error, backtrace) = error.into_parts();
backtrace.print_backtrace();
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
index b740b79d1..14b9894aa 100644
--- a/compiler/rustc_const_eval/src/const_eval/machine.rs
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -3,7 +3,7 @@ use rustc_hir::{LangItem, CRATE_HIR_ID};
use rustc_middle::mir;
use rustc_middle::mir::interpret::PointerArithmetic;
use rustc_middle::ty::layout::{FnAbiOf, TyAndLayout};
-use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{self, TyCtxt};
use rustc_session::lint::builtin::INVALID_ALIGNMENT;
use std::borrow::Borrow;
use std::hash::Hash;
@@ -464,6 +464,13 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
Ok(Some((ecx.load_mir(instance.def, None)?, orig_instance)))
}
+ fn panic_nounwind(ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
+ let msg = Symbol::intern(msg);
+ let span = ecx.find_closest_untracked_caller_location();
+ let (file, line, col) = ecx.location_triple_for_span(span);
+ Err(ConstEvalErrKind::Panic { msg, file, line, col }.into())
+ }
+
fn call_intrinsic(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
@@ -584,16 +591,12 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
Err(ConstEvalErrKind::AssertFailure(err).into())
}
- fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: String) -> InterpResult<'tcx, !> {
- Err(ConstEvalErrKind::Abort(msg).into())
- }
-
fn binary_ptr_op(
_ecx: &InterpCx<'mir, 'tcx, Self>,
_bin_op: mir::BinOp,
_left: &ImmTy<'tcx>,
_right: &ImmTy<'tcx>,
- ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
+ ) -> InterpResult<'tcx, (ImmTy<'tcx>, bool)> {
throw_unsup_format!("pointer arithmetic or comparison is not supported at compile-time");
}
diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs
index 854104622..bcbe996be 100644
--- a/compiler/rustc_const_eval/src/const_eval/mod.rs
+++ b/compiler/rustc_const_eval/src/const_eval/mod.rs
@@ -1,9 +1,10 @@
// Not in interpret to make sure we do not use private implementation details
use crate::errors::MaxNumNodesInConstErr;
-use crate::interpret::{intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, Scalar};
+use crate::interpret::{intern_const_alloc_recursive, InternKind, InterpCx, Scalar};
use rustc_middle::mir;
use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId};
+use rustc_middle::query::TyCtxtAt;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::{source_map::DUMMY_SP, symbol::Symbol};
@@ -22,7 +23,7 @@ pub(crate) use valtrees::{const_to_valtree_inner, valtree_to_const_value};
pub(crate) fn const_caller_location(
tcx: TyCtxt<'_>,
(file, line, col): (Symbol, u32, u32),
-) -> ConstValue<'_> {
+) -> mir::ConstValue<'_> {
trace!("const_caller_location: {}:{}:{}", file, line, col);
let mut ecx = mk_eval_cx(tcx, DUMMY_SP, ty::ParamEnv::reveal_all(), CanAccessStatics::No);
@@ -30,7 +31,7 @@ pub(crate) fn const_caller_location(
if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() {
bug!("intern_const_alloc_recursive should not error in this case")
}
- ConstValue::Scalar(Scalar::from_maybe_pointer(loc_place.ptr, &tcx))
+ mir::ConstValue::Scalar(Scalar::from_maybe_pointer(loc_place.ptr(), &tcx))
}
// We forbid type-level constants that contain more than `VALTREE_MAX_NODES` nodes.
@@ -85,18 +86,18 @@ pub(crate) fn eval_to_valtree<'tcx>(
}
#[instrument(skip(tcx), level = "debug")]
-pub fn try_destructure_mir_constant_for_diagnostics<'tcx>(
- tcx: TyCtxt<'tcx>,
- val: ConstValue<'tcx>,
+pub(crate) fn try_destructure_mir_constant_for_diagnostics<'tcx>(
+ tcx: TyCtxtAt<'tcx>,
+ val: mir::ConstValue<'tcx>,
ty: Ty<'tcx>,
) -> Option<mir::DestructuredConstant<'tcx>> {
let param_env = ty::ParamEnv::reveal_all();
- let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, CanAccessStatics::No);
+ let ecx = mk_eval_cx(tcx.tcx, tcx.span, param_env, CanAccessStatics::No);
let op = ecx.const_val_to_op(val, ty, None).ok()?;
// We go to `usize` as we cannot allocate anything bigger anyway.
let (field_count, variant, down) = match ty.kind() {
- ty::Array(_, len) => (len.eval_target_usize(tcx, param_env) as usize, None, op),
+ ty::Array(_, len) => (len.eval_target_usize(tcx.tcx, param_env) as usize, None, op),
ty::Adt(def, _) if def.variants().is_empty() => {
return None;
}
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
index b15a65d67..7436ea6ae 100644
--- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -4,10 +4,11 @@ use super::{ValTreeCreationError, ValTreeCreationResult, VALTREE_MAX_NODES};
use crate::const_eval::CanAccessStatics;
use crate::interpret::MPlaceTy;
use crate::interpret::{
- intern_const_alloc_recursive, ConstValue, ImmTy, Immediate, InternKind, MemPlaceMeta,
- MemoryKind, Place, Projectable, Scalar,
+ intern_const_alloc_recursive, ImmTy, Immediate, InternKind, MemPlaceMeta, MemoryKind, PlaceTy,
+ Projectable, Scalar,
};
-use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::mir;
+use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
use rustc_span::source_map::DUMMY_SP;
use rustc_target::abi::VariantIdx;
@@ -151,7 +152,7 @@ pub(crate) fn const_to_valtree_inner<'tcx>(
// FIXME(oli-obk): we can probably encode closures just like structs
| ty::Closure(..)
| ty::Generator(..)
- | ty::GeneratorWitness(..) |ty::GeneratorWitnessMIR(..)=> Err(ValTreeCreationError::NonSupportedType),
+ | ty::GeneratorWitness(..) => Err(ValTreeCreationError::NonSupportedType),
}
}
@@ -189,12 +190,11 @@ fn reconstruct_place_meta<'tcx>(
}
#[instrument(skip(ecx), level = "debug", ret)]
-fn create_pointee_place<'tcx>(
+fn create_valtree_place<'tcx>(
ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
- ty: Ty<'tcx>,
+ layout: TyAndLayout<'tcx>,
valtree: ty::ValTree<'tcx>,
) -> MPlaceTy<'tcx> {
- let layout = ecx.layout_of(ty).unwrap();
let meta = reconstruct_place_meta(layout, valtree, ecx.tcx.tcx);
ecx.allocate_dyn(layout, MemoryKind::Stack, meta).unwrap()
}
@@ -207,7 +207,7 @@ pub fn valtree_to_const_value<'tcx>(
tcx: TyCtxt<'tcx>,
param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
valtree: ty::ValTree<'tcx>,
-) -> ConstValue<'tcx> {
+) -> mir::ConstValue<'tcx> {
// Basic idea: We directly construct `Scalar` values from trivial `ValTree`s
// (those for constants with type bool, int, uint, float or char).
// For all other types we create an `MPlace` and fill that by walking
@@ -216,50 +216,56 @@ pub fn valtree_to_const_value<'tcx>(
// FIXME Does this need an example?
let (param_env, ty) = param_env_ty.into_parts();
- let mut ecx: crate::interpret::InterpCx<
- '_,
- '_,
- crate::const_eval::CompileTimeInterpreter<'_, '_>,
- > = mk_eval_cx(tcx, DUMMY_SP, param_env, CanAccessStatics::No);
match ty.kind() {
ty::FnDef(..) => {
assert!(valtree.unwrap_branch().is_empty());
- ConstValue::ZeroSized
+ mir::ConstValue::ZeroSized
}
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => match valtree {
- ty::ValTree::Leaf(scalar_int) => ConstValue::Scalar(Scalar::Int(scalar_int)),
+ ty::ValTree::Leaf(scalar_int) => mir::ConstValue::Scalar(Scalar::Int(scalar_int)),
ty::ValTree::Branch(_) => bug!(
"ValTrees for Bool, Int, Uint, Float or Char should have the form ValTree::Leaf"
),
},
- ty::Ref(_, _, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Adt(..) => {
- let place = match ty.kind() {
- ty::Ref(_, inner_ty, _) => {
- // Need to create a place for the pointee (the reference itself will be an immediate)
- create_pointee_place(&mut ecx, *inner_ty, valtree)
- }
- _ => {
- // Need to create a place for this valtree.
- create_pointee_place(&mut ecx, ty, valtree)
+ ty::Ref(_, inner_ty, _) => {
+ let mut ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, CanAccessStatics::No);
+ let imm = valtree_to_ref(&mut ecx, valtree, *inner_ty);
+ let imm = ImmTy::from_immediate(imm, tcx.layout_of(param_env_ty).unwrap());
+ op_to_const(&ecx, &imm.into())
+ }
+ ty::Tuple(_) | ty::Array(_, _) | ty::Adt(..) => {
+ let layout = tcx.layout_of(param_env_ty).unwrap();
+ if layout.is_zst() {
+ // Fast path to avoid some allocations.
+ return mir::ConstValue::ZeroSized;
+ }
+ if layout.abi.is_scalar()
+ && (matches!(ty.kind(), ty::Tuple(_))
+ || matches!(ty.kind(), ty::Adt(def, _) if def.is_struct()))
+ {
+ // A Scalar tuple/struct; we can avoid creating an allocation.
+ let branches = valtree.unwrap_branch();
+ // Find the non-ZST field. (There can be aligned ZST!)
+ for (i, &inner_valtree) in branches.iter().enumerate() {
+ let field = layout.field(&LayoutCx { tcx, param_env }, i);
+ if !field.is_zst() {
+ return valtree_to_const_value(tcx, param_env.and(field.ty), inner_valtree);
+ }
}
- };
- debug!(?place);
+ bug!("could not find non-ZST field during in {layout:#?}");
+ }
+
+ let mut ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, CanAccessStatics::No);
+
+ // Need to create a place for this valtree.
+ let place = create_valtree_place(&mut ecx, layout, valtree);
valtree_into_mplace(&mut ecx, &place, valtree);
dump_place(&ecx, &place);
intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap();
- match ty.kind() {
- ty::Ref(_, _, _) => {
- let ref_place = place.to_ref(&tcx);
- let imm =
- ImmTy::from_immediate(ref_place, tcx.layout_of(param_env_ty).unwrap());
-
- op_to_const(&ecx, &imm.into())
- }
- _ => op_to_const(&ecx, &place.into()),
- }
+ op_to_const(&ecx, &place.into())
}
ty::Never
| ty::Error(_)
@@ -274,7 +280,6 @@ pub fn valtree_to_const_value<'tcx>(
| ty::Closure(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::FnPtr(_)
| ty::RawPtr(_)
| ty::Str
@@ -283,6 +288,22 @@ pub fn valtree_to_const_value<'tcx>(
}
}
+/// Put a valtree into memory and return a reference to that.
+fn valtree_to_ref<'tcx>(
+ ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
+ valtree: ty::ValTree<'tcx>,
+ pointee_ty: Ty<'tcx>,
+) -> Immediate {
+ let pointee_place = create_valtree_place(ecx, ecx.layout_of(pointee_ty).unwrap(), valtree);
+ debug!(?pointee_place);
+
+ valtree_into_mplace(ecx, &pointee_place, valtree);
+ dump_place(ecx, &pointee_place);
+ intern_const_alloc_recursive(ecx, InternKind::Constant, &pointee_place).unwrap();
+
+ pointee_place.to_ref(&ecx.tcx)
+}
+
#[instrument(skip(ecx), level = "debug")]
fn valtree_into_mplace<'tcx>(
ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
@@ -292,7 +313,6 @@ fn valtree_into_mplace<'tcx>(
// This will match on valtree and write the value(s) corresponding to the ValTree
// inside the place recursively.
- let tcx = ecx.tcx.tcx;
let ty = place.layout.ty;
match ty.kind() {
@@ -305,27 +325,8 @@ fn valtree_into_mplace<'tcx>(
ecx.write_immediate(Immediate::Scalar(scalar_int.into()), place).unwrap();
}
ty::Ref(_, inner_ty, _) => {
- let pointee_place = create_pointee_place(ecx, *inner_ty, valtree);
- debug!(?pointee_place);
-
- valtree_into_mplace(ecx, &pointee_place, valtree);
- dump_place(ecx, &pointee_place);
- intern_const_alloc_recursive(ecx, InternKind::Constant, &pointee_place).unwrap();
-
- let imm = match inner_ty.kind() {
- ty::Slice(_) | ty::Str => {
- let len = valtree.unwrap_branch().len();
- let len_scalar = Scalar::from_target_usize(len as u64, &tcx);
-
- Immediate::ScalarPair(
- Scalar::from_maybe_pointer((*pointee_place).ptr, &tcx),
- len_scalar,
- )
- }
- _ => pointee_place.to_ref(&tcx),
- };
+ let imm = valtree_to_ref(ecx, valtree, *inner_ty);
debug!(?imm);
-
ecx.write_immediate(imm, place).unwrap();
}
ty::Adt(_, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Str | ty::Slice(_) => {
@@ -383,5 +384,5 @@ fn valtree_into_mplace<'tcx>(
}
fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx, 'tcx>, place: &MPlaceTy<'tcx>) {
- trace!("{:?}", ecx.dump_place(Place::Ptr(**place)));
+ trace!("{:?}", ecx.dump_place(&PlaceTy::from(place.clone())));
}
diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs
index 4362cae7e..b1599dd68 100644
--- a/compiler/rustc_const_eval/src/errors.rs
+++ b/compiler/rustc_const_eval/src/errors.rs
@@ -239,13 +239,6 @@ pub struct LongRunningWarn {
pub item_span: Span,
}
-#[derive(Diagnostic)]
-#[diag(const_eval_erroneous_constant)]
-pub(crate) struct ErroneousConstUsed {
- #[primary_span]
- pub span: Span,
-}
-
#[derive(Subdiagnostic)]
#[note(const_eval_non_const_impl)]
pub(crate) struct NonConstImplNote {
@@ -482,6 +475,9 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
use UndefinedBehaviorInfo::*;
match self {
Ub(msg) => msg.clone().into(),
+ Custom(x) => (x.msg)(),
+ ValidationError(e) => e.diagnostic_message(),
+
Unreachable => const_eval_unreachable,
BoundsCheckFailed { .. } => const_eval_bounds_check_failed,
DivisionByZero => const_eval_division_by_zero,
@@ -513,8 +509,8 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
ScalarSizeMismatch(_) => const_eval_scalar_size_mismatch,
UninhabitedEnumVariantWritten(_) => const_eval_uninhabited_enum_variant_written,
UninhabitedEnumVariantRead(_) => const_eval_uninhabited_enum_variant_read,
- ValidationError(e) => e.diagnostic_message(),
- Custom(x) => (x.msg)(),
+ AbiMismatchArgument { .. } => const_eval_incompatible_types,
+ AbiMismatchReturn { .. } => const_eval_incompatible_return_types,
}
}
@@ -525,8 +521,15 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
) {
use UndefinedBehaviorInfo::*;
match self {
- Ub(_)
- | Unreachable
+ Ub(_) => {}
+ Custom(custom) => {
+ (custom.add_args)(&mut |name, value| {
+ builder.set_arg(name, value);
+ });
+ }
+ ValidationError(e) => e.add_args(handler, builder),
+
+ Unreachable
| DivisionByZero
| RemainderByZero
| DivisionOverflow
@@ -593,11 +596,10 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> {
builder.set_arg("target_size", info.target_size);
builder.set_arg("data_size", info.data_size);
}
- ValidationError(e) => e.add_args(handler, builder),
- Custom(custom) => {
- (custom.add_args)(&mut |name, value| {
- builder.set_arg(name, value);
- });
+ AbiMismatchArgument { caller_ty, callee_ty }
+ | AbiMismatchReturn { caller_ty, callee_ty } => {
+ builder.set_arg("caller_ty", caller_ty.to_string());
+ builder.set_arg("callee_ty", callee_ty.to_string());
}
}
}
@@ -795,6 +797,7 @@ impl ReportErrorExt for UnsupportedOpInfo {
use crate::fluent_generated::*;
match self {
UnsupportedOpInfo::Unsupported(s) => s.clone().into(),
+ UnsupportedOpInfo::UnsizedLocal => const_eval_unsized_local,
UnsupportedOpInfo::OverwritePartialPointer(_) => const_eval_partial_pointer_overwrite,
UnsupportedOpInfo::ReadPartialPointer(_) => const_eval_partial_pointer_copy,
UnsupportedOpInfo::ReadPointerAsInt(_) => const_eval_read_pointer_as_int,
@@ -814,7 +817,7 @@ impl ReportErrorExt for UnsupportedOpInfo {
// `ReadPointerAsInt(Some(info))` is never printed anyway, it only serves as an error to
// be further processed by validity checking which then turns it into something nice to
// print. So it's not worth the effort of having diagnostics that can print the `info`.
- Unsupported(_) | ReadPointerAsInt(_) => {}
+ UnsizedLocal | Unsupported(_) | ReadPointerAsInt(_) => {}
OverwritePartialPointer(ptr) | ReadPartialPointer(ptr) => {
builder.set_arg("ptr", ptr);
}
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index 98e853dc4..b9f88cf63 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -24,41 +24,44 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
cast_ty: Ty<'tcx>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
+ // `cast_ty` will often be the same as `dest.ty`, but not always, since subtyping is still
+ // possible.
+ let cast_layout =
+ if cast_ty == dest.layout.ty { dest.layout } else { self.layout_of(cast_ty)? };
// FIXME: In which cases should we trigger UB when the source is uninit?
match cast_kind {
CastKind::PointerCoercion(PointerCoercion::Unsize) => {
- let cast_ty = self.layout_of(cast_ty)?;
- self.unsize_into(src, cast_ty, dest)?;
+ self.unsize_into(src, cast_layout, dest)?;
}
CastKind::PointerExposeAddress => {
let src = self.read_immediate(src)?;
- let res = self.pointer_expose_address_cast(&src, cast_ty)?;
- self.write_immediate(res, dest)?;
+ let res = self.pointer_expose_address_cast(&src, cast_layout)?;
+ self.write_immediate(*res, dest)?;
}
CastKind::PointerFromExposedAddress => {
let src = self.read_immediate(src)?;
- let res = self.pointer_from_exposed_address_cast(&src, cast_ty)?;
- self.write_immediate(res, dest)?;
+ let res = self.pointer_from_exposed_address_cast(&src, cast_layout)?;
+ self.write_immediate(*res, dest)?;
}
CastKind::IntToInt | CastKind::IntToFloat => {
let src = self.read_immediate(src)?;
- let res = self.int_to_int_or_float(&src, cast_ty)?;
- self.write_immediate(res, dest)?;
+ let res = self.int_to_int_or_float(&src, cast_layout)?;
+ self.write_immediate(*res, dest)?;
}
CastKind::FloatToFloat | CastKind::FloatToInt => {
let src = self.read_immediate(src)?;
- let res = self.float_to_float_or_int(&src, cast_ty)?;
- self.write_immediate(res, dest)?;
+ let res = self.float_to_float_or_int(&src, cast_layout)?;
+ self.write_immediate(*res, dest)?;
}
CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
let src = self.read_immediate(src)?;
- let res = self.ptr_to_ptr(&src, cast_ty)?;
- self.write_immediate(res, dest)?;
+ let res = self.ptr_to_ptr(&src, cast_layout)?;
+ self.write_immediate(*res, dest)?;
}
CastKind::PointerCoercion(
@@ -84,10 +87,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
)
.ok_or_else(|| err_inval!(TooGeneric))?;
- let fn_ptr = self.create_fn_alloc_ptr(FnVal::Instance(instance));
+ let fn_ptr = self.fn_ptr(FnVal::Instance(instance));
self.write_pointer(fn_ptr, dest)?;
}
- _ => span_bug!(self.cur_span(), "reify fn pointer on {:?}", src.layout.ty),
+ _ => span_bug!(self.cur_span(), "reify fn pointer on {}", src.layout.ty),
}
}
@@ -98,7 +101,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// No change to value
self.write_immediate(*src, dest)?;
}
- _ => span_bug!(self.cur_span(), "fn to unsafe fn cast on {:?}", cast_ty),
+ _ => span_bug!(self.cur_span(), "fn to unsafe fn cast on {}", cast_ty),
}
}
@@ -116,10 +119,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ty::ClosureKind::FnOnce,
)
.ok_or_else(|| err_inval!(TooGeneric))?;
- let fn_ptr = self.create_fn_alloc_ptr(FnVal::Instance(instance));
+ let fn_ptr = self.fn_ptr(FnVal::Instance(instance));
self.write_pointer(fn_ptr, dest)?;
}
- _ => span_bug!(self.cur_span(), "closure fn pointer on {:?}", src.layout.ty),
+ _ => span_bug!(self.cur_span(), "closure fn pointer on {}", src.layout.ty),
}
}
@@ -140,6 +143,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
CastKind::Transmute => {
assert!(src.layout.is_sized());
assert!(dest.layout.is_sized());
+ assert_eq!(cast_ty, dest.layout.ty); // we otherwise ignore `cast_ty` enirely...
if src.layout.size != dest.layout.size {
let src_bytes = src.layout.size.bytes();
let dest_bytes = dest.layout.size.bytes();
@@ -164,62 +168,61 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn int_to_int_or_float(
&self,
src: &ImmTy<'tcx, M::Provenance>,
- cast_ty: Ty<'tcx>,
- ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ cast_to: TyAndLayout<'tcx>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert!(src.layout.ty.is_integral() || src.layout.ty.is_char() || src.layout.ty.is_bool());
- assert!(cast_ty.is_floating_point() || cast_ty.is_integral() || cast_ty.is_char());
+ assert!(cast_to.ty.is_floating_point() || cast_to.ty.is_integral() || cast_to.ty.is_char());
- Ok(self.cast_from_int_like(src.to_scalar(), src.layout, cast_ty)?.into())
+ Ok(ImmTy::from_scalar(
+ self.cast_from_int_like(src.to_scalar(), src.layout, cast_to.ty)?,
+ cast_to,
+ ))
}
/// Handles 'FloatToFloat' and 'FloatToInt' casts.
pub fn float_to_float_or_int(
&self,
src: &ImmTy<'tcx, M::Provenance>,
- cast_ty: Ty<'tcx>,
- ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ cast_to: TyAndLayout<'tcx>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
use rustc_type_ir::sty::TyKind::*;
- match src.layout.ty.kind() {
+ let val = match src.layout.ty.kind() {
// Floating point
- Float(FloatTy::F32) => {
- return Ok(self.cast_from_float(src.to_scalar().to_f32()?, cast_ty).into());
- }
- Float(FloatTy::F64) => {
- return Ok(self.cast_from_float(src.to_scalar().to_f64()?, cast_ty).into());
- }
+ Float(FloatTy::F32) => self.cast_from_float(src.to_scalar().to_f32()?, cast_to.ty),
+ Float(FloatTy::F64) => self.cast_from_float(src.to_scalar().to_f64()?, cast_to.ty),
_ => {
- bug!("Can't cast 'Float' type into {:?}", cast_ty);
+ bug!("Can't cast 'Float' type into {}", cast_to.ty);
}
- }
+ };
+ Ok(ImmTy::from_scalar(val, cast_to))
}
/// Handles 'FnPtrToPtr' and 'PtrToPtr' casts.
pub fn ptr_to_ptr(
&self,
src: &ImmTy<'tcx, M::Provenance>,
- cast_ty: Ty<'tcx>,
- ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ cast_to: TyAndLayout<'tcx>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert!(src.layout.ty.is_any_ptr());
- assert!(cast_ty.is_unsafe_ptr());
+ assert!(cast_to.ty.is_unsafe_ptr());
// Handle casting any ptr to raw ptr (might be a fat ptr).
- let dest_layout = self.layout_of(cast_ty)?;
- if dest_layout.size == src.layout.size {
+ if cast_to.size == src.layout.size {
// Thin or fat pointer that just hast the ptr kind of target type changed.
- return Ok(**src);
+ return Ok(ImmTy::from_immediate(**src, cast_to));
} else {
// Casting the metadata away from a fat ptr.
assert_eq!(src.layout.size, 2 * self.pointer_size());
- assert_eq!(dest_layout.size, self.pointer_size());
+ assert_eq!(cast_to.size, self.pointer_size());
assert!(src.layout.ty.is_unsafe_ptr());
return match **src {
- Immediate::ScalarPair(data, _) => Ok(data.into()),
+ Immediate::ScalarPair(data, _) => Ok(ImmTy::from_scalar(data, cast_to)),
Immediate::Scalar(..) => span_bug!(
self.cur_span(),
- "{:?} input to a fat-to-thin cast ({:?} -> {:?})",
+ "{:?} input to a fat-to-thin cast ({} -> {})",
*src,
src.layout.ty,
- cast_ty
+ cast_to.ty
),
Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
};
@@ -229,10 +232,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn pointer_expose_address_cast(
&mut self,
src: &ImmTy<'tcx, M::Provenance>,
- cast_ty: Ty<'tcx>,
- ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ cast_to: TyAndLayout<'tcx>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert_matches!(src.layout.ty.kind(), ty::RawPtr(_) | ty::FnPtr(_));
- assert!(cast_ty.is_integral());
+ assert!(cast_to.ty.is_integral());
let scalar = src.to_scalar();
let ptr = scalar.to_pointer(self)?;
@@ -240,16 +243,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(ptr) => M::expose_ptr(self, ptr)?,
Err(_) => {} // Do nothing, exposing an invalid pointer (`None` provenance) is a NOP.
};
- Ok(self.cast_from_int_like(scalar, src.layout, cast_ty)?.into())
+ Ok(ImmTy::from_scalar(self.cast_from_int_like(scalar, src.layout, cast_to.ty)?, cast_to))
}
pub fn pointer_from_exposed_address_cast(
&self,
src: &ImmTy<'tcx, M::Provenance>,
- cast_ty: Ty<'tcx>,
- ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ cast_to: TyAndLayout<'tcx>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert!(src.layout.ty.is_integral());
- assert_matches!(cast_ty.kind(), ty::RawPtr(_));
+ assert_matches!(cast_to.ty.kind(), ty::RawPtr(_));
// First cast to usize.
let scalar = src.to_scalar();
@@ -258,12 +261,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Then turn address into pointer.
let ptr = M::ptr_from_addr_cast(&self, addr)?;
- Ok(Scalar::from_maybe_pointer(ptr, self).into())
+ Ok(ImmTy::from_scalar(Scalar::from_maybe_pointer(ptr, self), cast_to))
}
/// Low-level cast helper function. This works directly on scalars and can take 'int-like' input
/// type (basically everything with a scalar layout) to int/float/char types.
- pub fn cast_from_int_like(
+ fn cast_from_int_like(
&self,
scalar: Scalar<M::Provenance>, // input value (there is no ScalarTy so we separate data+layout)
src_layout: TyAndLayout<'tcx>,
@@ -298,7 +301,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
// Casts to bool are not permitted by rustc, no need to handle them here.
- _ => span_bug!(self.cur_span(), "invalid int to {:?} cast", cast_ty),
+ _ => span_bug!(self.cur_span(), "invalid int to {} cast", cast_ty),
})
}
@@ -331,7 +334,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// float -> f64
Float(FloatTy::F64) => Scalar::from_f64(f.convert(&mut false).value),
// That's it.
- _ => span_bug!(self.cur_span(), "invalid float to {:?} cast", dest_ty),
+ _ => span_bug!(self.cur_span(), "invalid float to {} cast", dest_ty),
}
}
@@ -351,7 +354,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
match (&src_pointee_ty.kind(), &dest_pointee_ty.kind()) {
(&ty::Array(_, length), &ty::Slice(_)) => {
- let ptr = self.read_scalar(src)?;
+ let ptr = self.read_pointer(src)?;
// u64 cast is from usize to u64, which is always good
let val = Immediate::new_slice(
ptr,
@@ -367,6 +370,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
return self.write_immediate(*val, dest);
}
let (old_data, old_vptr) = val.to_scalar_pair();
+ let old_data = old_data.to_pointer(self)?;
let old_vptr = old_vptr.to_pointer(self)?;
let (ty, old_trait) = self.get_ptr_vtable(old_vptr)?;
if old_trait != data_a.principal() {
@@ -378,7 +382,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
(_, &ty::Dynamic(data, _, ty::Dyn)) => {
// Initial cast from sized to dyn trait
let vtable = self.get_vtable_ptr(src_pointee_ty, data.principal())?;
- let ptr = self.read_scalar(src)?;
+ let ptr = self.read_pointer(src)?;
let val = Immediate::new_dyn_trait(ptr, vtable, &*self.tcx);
self.write_immediate(val, dest)
}
@@ -389,7 +393,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
span_bug!(
self.cur_span(),
- "invalid pointer unsizing {:?} -> {:?}",
+ "invalid pointer unsizing {} -> {}",
src.layout.ty,
cast_ty
)
@@ -403,28 +407,32 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
cast_ty: TyAndLayout<'tcx>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
- trace!("Unsizing {:?} of type {} into {:?}", *src, src.layout.ty, cast_ty.ty);
+ trace!("Unsizing {:?} of type {} into {}", *src, src.layout.ty, cast_ty.ty);
match (&src.layout.ty.kind(), &cast_ty.ty.kind()) {
(&ty::Ref(_, s, _), &ty::Ref(_, c, _) | &ty::RawPtr(TypeAndMut { ty: c, .. }))
| (&ty::RawPtr(TypeAndMut { ty: s, .. }), &ty::RawPtr(TypeAndMut { ty: c, .. })) => {
self.unsize_into_ptr(src, dest, *s, *c)
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
- assert_eq!(def_a, def_b);
+ assert_eq!(def_a, def_b); // implies same number of fields
- // unsizing of generic struct with pointer fields
- // Example: `Arc<T>` -> `Arc<Trait>`
- // here we need to increase the size of every &T thin ptr field to a fat ptr
+ // Unsizing of generic struct with pointer fields, like `Arc<T>` -> `Arc<Trait>`.
+ // There can be extra fields as long as they don't change their type or are 1-ZST.
+ // There might also be no field that actually needs unsizing.
+ let mut found_cast_field = false;
for i in 0..src.layout.fields.count() {
let cast_ty_field = cast_ty.field(self, i);
- if cast_ty_field.is_zst() {
- continue;
- }
let src_field = self.project_field(src, i)?;
let dst_field = self.project_field(dest, i)?;
- if src_field.layout.ty == cast_ty_field.ty {
+ if src_field.layout.is_1zst() && cast_ty_field.is_1zst() {
+ // Skip 1-ZST fields.
+ } else if src_field.layout.ty == cast_ty_field.ty {
self.copy_op(&src_field, &dst_field, /*allow_transmute*/ false)?;
} else {
+ if found_cast_field {
+ span_bug!(self.cur_span(), "unsize_into: more than one field to cast");
+ }
+ found_cast_field = true;
self.unsize_into(&src_field, cast_ty_field, &dst_field)?;
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
index 6c35fb01a..49e01728f 100644
--- a/compiler/rustc_const_eval/src/interpret/discriminant.rs
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -76,7 +76,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val =
ImmTy::from_uint(variant_index_relative, tag_layout);
- let tag_val = self.binary_op(
+ let tag_val = self.wrapping_binary_op(
mir::BinOp::Add,
&variant_index_relative_val,
&niche_start_val,
@@ -153,19 +153,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Figure out which discriminant and variant this corresponds to.
let index = match *tag_encoding {
TagEncoding::Direct => {
- let scalar = tag_val.to_scalar();
// Generate a specific error if `tag_val` is not an integer.
// (`tag_bits` itself is only used for error messages below.)
- let tag_bits = scalar
+ let tag_bits = tag_val
+ .to_scalar()
.try_to_int()
.map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
.assert_bits(tag_layout.size);
// Cast bits from tag layout to discriminant layout.
// After the checks we did above, this cannot fail, as
// discriminants are int-like.
- let discr_val =
- self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
- let discr_bits = discr_val.assert_bits(discr_layout.size);
+ let discr_val = self.int_to_int_or_float(&tag_val, discr_layout).unwrap();
+ let discr_bits = discr_val.to_scalar().assert_bits(discr_layout.size);
// Convert discriminant to variant index, and catch invalid discriminants.
let index = match *ty.kind() {
ty::Adt(adt, _) => {
@@ -208,7 +207,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val =
- self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
+ self.wrapping_binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
let variant_index_relative =
variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
// Check if this is in the range that indicates an actual discriminant.
@@ -247,9 +246,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self,
layout: TyAndLayout<'tcx>,
variant: VariantIdx,
- ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
let discr_layout = self.layout_of(layout.ty.discriminant_ty(*self.tcx))?;
- Ok(match layout.ty.discriminant_for_variant(*self.tcx, variant) {
+ let discr_value = match layout.ty.discriminant_for_variant(*self.tcx, variant) {
Some(discr) => {
// This type actually has discriminants.
assert_eq!(discr.ty, discr_layout.ty);
@@ -260,6 +259,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert_eq!(variant.as_u32(), 0);
Scalar::from_uint(variant.as_u32(), discr_layout.size)
}
- })
+ };
+ Ok(ImmTy::from_scalar(discr_value, discr_layout))
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
index 3ac6f07e8..af7dfbef2 100644
--- a/compiler/rustc_const_eval/src/interpret/eval_context.rs
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -7,13 +7,13 @@ use hir::CRATE_HIR_ID;
use rustc_hir::{self as hir, def_id::DefId, definitions::DefPathData};
use rustc_index::IndexVec;
use rustc_middle::mir;
-use rustc_middle::mir::interpret::{ErrorHandled, InterpError, InvalidMetaKind, ReportedErrorInfo};
+use rustc_middle::mir::interpret::{ErrorHandled, InvalidMetaKind, ReportedErrorInfo};
use rustc_middle::query::TyCtxtAt;
use rustc_middle::ty::layout::{
self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOf, LayoutOfHelpers,
TyAndLayout,
};
-use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt, TypeFoldable};
+use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt, TypeFoldable, Variance};
use rustc_mir_dataflow::storage::always_storage_live_locals;
use rustc_session::Limit;
use rustc_span::Span;
@@ -21,12 +21,12 @@ use rustc_target::abi::{call::FnAbi, Align, HasDataLayout, Size, TargetDataLayou
use super::{
AllocId, GlobalId, Immediate, InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlace,
- MemPlaceMeta, Memory, MemoryKind, Operand, Place, PlaceTy, PointerArithmetic, Provenance,
- Scalar, StackPopJump,
+ MemPlaceMeta, Memory, MemoryKind, OpTy, Operand, Place, PlaceTy, Pointer, PointerArithmetic,
+ Projectable, Provenance, Scalar, StackPopJump,
};
-use crate::errors::{self, ErroneousConstUsed};
-use crate::fluent_generated as fluent;
+use crate::errors;
use crate::util;
+use crate::{fluent_generated as fluent, ReportErrorExt};
pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
/// Stores the `Machine` instance.
@@ -155,16 +155,26 @@ pub enum StackPopCleanup {
}
/// State of a local variable including a memoized layout
-#[derive(Clone, Debug)]
+#[derive(Clone)]
pub struct LocalState<'tcx, Prov: Provenance = AllocId> {
- pub value: LocalValue<Prov>,
- /// Don't modify if `Some`, this is only used to prevent computing the layout twice
- pub layout: Cell<Option<TyAndLayout<'tcx>>>,
+ value: LocalValue<Prov>,
+ /// Don't modify if `Some`, this is only used to prevent computing the layout twice.
+ /// Avoids computing the layout of locals that are never actually initialized.
+ layout: Cell<Option<TyAndLayout<'tcx>>>,
+}
+
+impl<Prov: Provenance> std::fmt::Debug for LocalState<'_, Prov> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("LocalState")
+ .field("value", &self.value)
+ .field("ty", &self.layout.get().map(|l| l.ty))
+ .finish()
+ }
}
/// Current value of a local variable
#[derive(Copy, Clone, Debug)] // Miri debug-prints these
-pub enum LocalValue<Prov: Provenance = AllocId> {
+pub(super) enum LocalValue<Prov: Provenance = AllocId> {
/// This local is not currently alive, and cannot be used at all.
Dead,
/// A normal, live local.
@@ -175,10 +185,27 @@ pub enum LocalValue<Prov: Provenance = AllocId> {
Live(Operand<Prov>),
}
-impl<'tcx, Prov: Provenance + 'static> LocalState<'tcx, Prov> {
+impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> {
+ pub fn make_live_uninit(&mut self) {
+ self.value = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
+ }
+
+ /// This is a hack because Miri needs a way to visit all the provenance in a `LocalState`
+ /// without having a layout or `TyCtxt` available, and we want to keep the `Operand` type
+ /// private.
+ pub fn as_mplace_or_imm(
+ &self,
+ ) -> Option<Either<(Pointer<Option<Prov>>, MemPlaceMeta<Prov>), Immediate<Prov>>> {
+ match self.value {
+ LocalValue::Dead => None,
+ LocalValue::Live(Operand::Indirect(mplace)) => Some(Left((mplace.ptr, mplace.meta))),
+ LocalValue::Live(Operand::Immediate(imm)) => Some(Right(imm)),
+ }
+ }
+
/// Read the local's value or error if the local is not yet live or not live anymore.
- #[inline]
- pub fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> {
+ #[inline(always)]
+ pub(super) fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> {
match &self.value {
LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
LocalValue::Live(val) => Ok(val),
@@ -188,10 +215,10 @@ impl<'tcx, Prov: Provenance + 'static> LocalState<'tcx, Prov> {
/// Overwrite the local. If the local can be overwritten in place, return a reference
/// to do so; otherwise return the `MemPlace` to consult instead.
///
- /// Note: This may only be invoked from the `Machine::access_local_mut` hook and not from
- /// anywhere else. You may be invalidating machine invariants if you do!
- #[inline]
- pub fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Prov>> {
+ /// Note: Before calling this, call the `before_access_local_mut` machine hook! You may be
+ /// invalidating machine invariants otherwise!
+ #[inline(always)]
+ pub(super) fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Prov>> {
match &mut self.value {
LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
LocalValue::Live(val) => Ok(val),
@@ -357,7 +384,7 @@ pub(super) fn mir_assign_valid_types<'tcx>(
// all normal lifetimes are erased, higher-ranked types with their
// late-bound lifetimes are still around and can lead to type
// differences.
- if util::is_subtype(tcx, param_env, src.ty, dest.ty) {
+ if util::relate_types(tcx, param_env, Variance::Covariant, src.ty, dest.ty) {
// Make sure the layout is equal, too -- just to be safe. Miri really
// needs layout equality. For performance reason we skip this check when
// the types are equal. Equal types *can* have different layouts when
@@ -389,7 +416,7 @@ pub(super) fn from_known_layout<'tcx>(
if !mir_assign_valid_types(tcx.tcx, param_env, check_layout, known_layout) {
span_bug!(
tcx.span,
- "expected type differs from actual type.\nexpected: {:?}\nactual: {:?}",
+ "expected type differs from actual type.\nexpected: {}\nactual: {}",
known_layout.ty,
check_layout.ty,
);
@@ -432,6 +459,27 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.map_or(CRATE_HIR_ID, |def_id| self.tcx.hir().local_def_id_to_hir_id(def_id))
}
+ /// Turn the given error into a human-readable string. Expects the string to be printed, so if
+ /// `RUSTC_CTFE_BACKTRACE` is set this will show a backtrace of the rustc internals that
+ /// triggered the error.
+ ///
+ /// This is NOT the preferred way to render an error; use `report` from `const_eval` instead.
+ /// However, this is useful when error messages appear in ICEs.
+ pub fn format_error(&self, e: InterpErrorInfo<'tcx>) -> String {
+ let (e, backtrace) = e.into_parts();
+ backtrace.print_backtrace();
+ // FIXME(fee1-dead), HACK: we want to use the error as title therefore we can just extract the
+ // label and arguments from the InterpError.
+ let handler = &self.tcx.sess.parse_sess.span_diagnostic;
+ #[allow(rustc::untranslatable_diagnostic)]
+ let mut diag = self.tcx.sess.struct_allow("");
+ let msg = e.diagnostic_message();
+ e.add_args(handler, &mut diag);
+ let s = handler.eagerly_translate_to_string(msg, diag.args());
+ diag.cancel();
+ s
+ }
+
#[inline(always)]
pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>] {
M::stack(self)
@@ -462,7 +510,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
#[inline(always)]
- pub(super) fn body(&self) -> &'mir mir::Body<'tcx> {
+ pub fn body(&self) -> &'mir mir::Body<'tcx> {
self.frame().body
}
@@ -508,7 +556,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
>(
&self,
value: T,
- ) -> Result<T, InterpError<'tcx>> {
+ ) -> Result<T, ErrorHandled> {
self.subst_from_frame_and_normalize_erasing_regions(self.frame(), value)
}
@@ -518,15 +566,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self,
frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
value: T,
- ) -> Result<T, InterpError<'tcx>> {
+ ) -> Result<T, ErrorHandled> {
frame
.instance
- .try_subst_mir_and_normalize_erasing_regions(
+ .try_instantiate_mir_and_normalize_erasing_regions(
*self.tcx,
self.param_env,
ty::EarlyBinder::bind(value),
)
- .map_err(|_| err_inval!(TooGeneric))
+ .map_err(|_| ErrorHandled::TooGeneric(self.cur_span()))
}
/// The `args` are assumed to already be in our interpreter "universe" (param_env).
@@ -664,7 +712,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ty::Foreign(_) => Ok(None),
- _ => span_bug!(self.cur_span(), "size_and_align_of::<{:?}> not supported", layout.ty),
+ _ => span_bug!(self.cur_span(), "size_and_align_of::<{}> not supported", layout.ty),
}
}
#[inline]
@@ -672,7 +720,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self,
mplace: &MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Option<(Size, Align)>> {
- self.size_and_align_of(&mplace.meta, &mplace.layout)
+ self.size_and_align_of(&mplace.meta(), &mplace.layout)
}
#[instrument(skip(self, body, return_place, return_to_block), level = "debug")]
@@ -684,15 +732,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
return_to_block: StackPopCleanup,
) -> InterpResult<'tcx> {
trace!("body: {:#?}", body);
+ let dead_local = LocalState { value: LocalValue::Dead, layout: Cell::new(None) };
+ let locals = IndexVec::from_elem(dead_local, &body.local_decls);
// First push a stack frame so we have access to the local args
let pre_frame = Frame {
body,
loc: Right(body.span), // Span used for errors caused during preamble.
return_to_block,
return_place: return_place.clone(),
- // empty local array, we fill it in below, after we are inside the stack frame and
- // all methods actually know about the frame
- locals: IndexVec::new(),
+ locals,
instance,
tracing_span: SpanGuard::new(),
extra: (),
@@ -701,25 +749,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.stack_mut().push(frame);
// Make sure all the constants required by this frame evaluate successfully (post-monomorphization check).
- for ct in &body.required_consts {
- let span = ct.span;
- let ct = self.subst_from_current_frame_and_normalize_erasing_regions(ct.literal)?;
- self.eval_mir_constant(&ct, Some(span), None)?;
+ if M::POST_MONO_CHECKS {
+ // `ctfe_query` does some error message decoration that we want to be in effect here.
+ self.ctfe_query(None, |tcx| {
+ body.post_mono_checks(*tcx, self.param_env, |c| {
+ self.subst_from_current_frame_and_normalize_erasing_regions(c)
+ })
+ })?;
}
- // Most locals are initially dead.
- let dummy = LocalState { value: LocalValue::Dead, layout: Cell::new(None) };
- let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
-
- // Now mark those locals as live that have no `Storage*` annotations.
- let always_live = always_storage_live_locals(self.body());
- for local in locals.indices() {
- if always_live.contains(local) {
- locals[local].value = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
- }
- }
// done
- self.frame_mut().locals = locals;
M::after_stack_push(self)?;
self.frame_mut().loc = Left(mir::Location::START);
@@ -756,6 +795,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
///
/// If `target` is `UnwindAction::Unreachable`, that indicates the function does not allow
/// unwinding, and doing so is UB.
+ #[cold] // usually we have normal returns, not unwinding
pub fn unwind_to_block(&mut self, target: mir::UnwindAction) -> InterpResult<'tcx> {
self.frame_mut().loc = match target {
mir::UnwindAction::Cleanup(block) => Left(mir::Location { block, statement_index: 0 }),
@@ -763,9 +803,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
mir::UnwindAction::Unreachable => {
throw_ub_custom!(fluent::const_eval_unreachable_unwind);
}
- mir::UnwindAction::Terminate => {
+ mir::UnwindAction::Terminate(reason) => {
self.frame_mut().loc = Right(self.frame_mut().body.span);
- M::abort(self, "panic in a function that cannot unwind".to_owned())?;
+ M::unwind_terminate(self, reason)?;
+ // This might have pushed a new stack frame, or it terminated execution.
+ // Either way, `loc` will not be updated.
+ return Ok(());
}
};
Ok(())
@@ -812,7 +855,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.expect("return place should always be live");
let dest = self.frame().return_place.clone();
let err = self.copy_op(&op, &dest, /*allow_transmute*/ true);
- trace!("return value: {:?}", self.dump_place(*dest));
+ trace!("return value: {:?}", self.dump_place(&dest));
// We delay actually short-circuiting on this error until *after* the stack frame is
// popped, since we want this error to be attributed to the caller, whose type defines
// this transmute.
@@ -865,6 +908,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
panic!("encountered StackPopCleanup::Root when unwinding!")
}
};
+ // This must be the very last thing that happens, since it can in fact push a new stack frame.
self.unwind_to_block(unwind)
} else {
// Follow the normal return edge.
@@ -881,12 +925,95 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
- /// Mark a storage as live, killing the previous content.
- pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> {
- assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
+ /// In the current stack frame, mark all locals as live that are not arguments and don't have
+ /// `Storage*` annotations (this includes the return place).
+ pub fn storage_live_for_always_live_locals(&mut self) -> InterpResult<'tcx> {
+ self.storage_live(mir::RETURN_PLACE)?;
+
+ let body = self.body();
+ let always_live = always_storage_live_locals(body);
+ for local in body.vars_and_temps_iter() {
+ if always_live.contains(local) {
+ self.storage_live(local)?;
+ }
+ }
+ Ok(())
+ }
+
+ pub fn storage_live_dyn(
+ &mut self,
+ local: mir::Local,
+ meta: MemPlaceMeta<M::Provenance>,
+ ) -> InterpResult<'tcx> {
trace!("{:?} is now live", local);
- let local_val = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
+ // We avoid `ty.is_trivially_sized` since that (a) cannot assume WF, so it recurses through
+ // all fields of a tuple, and (b) does something expensive for ADTs.
+ fn is_very_trivially_sized(ty: Ty<'_>) -> bool {
+ match ty.kind() {
+ ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
+ | ty::Uint(_)
+ | ty::Int(_)
+ | ty::Bool
+ | ty::Float(_)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::RawPtr(..)
+ | ty::Char
+ | ty::Ref(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Array(..)
+ | ty::Closure(..)
+ | ty::Never
+ | ty::Error(_) => true,
+
+ ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => false,
+
+ ty::Tuple(tys) => tys.last().iter().all(|ty| is_very_trivially_sized(**ty)),
+
+ // We don't want to do any queries, so there is not much we can do with ADTs.
+ ty::Adt(..) => false,
+
+ ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => false,
+
+ ty::Infer(ty::TyVar(_)) => false,
+
+ ty::Bound(..)
+ | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+ bug!("`is_very_trivially_sized` applied to unexpected type: {}", ty)
+ }
+ }
+ }
+
+ // This is a hot function, we avoid computing the layout when possible.
+ // `unsized_` will be `None` for sized types and `Some(layout)` for unsized types.
+ let unsized_ = if is_very_trivially_sized(self.body().local_decls[local].ty) {
+ None
+ } else {
+ // We need the layout.
+ let layout = self.layout_of_local(self.frame(), local, None)?;
+ if layout.is_sized() { None } else { Some(layout) }
+ };
+
+ let local_val = LocalValue::Live(if let Some(layout) = unsized_ {
+ if !meta.has_meta() {
+ throw_unsup!(UnsizedLocal);
+ }
+ // Need to allocate some memory, since `Immediate::Uninit` cannot be unsized.
+ let dest_place = self.allocate_dyn(layout, MemoryKind::Stack, meta)?;
+ Operand::Indirect(*dest_place.mplace())
+ } else {
+ assert!(!meta.has_meta()); // we're dropping the metadata
+ // Just make this an efficient immediate.
+ // Note that not calling `layout_of` here does have one real consequence:
+ // if the type is too big, we'll only notice this when the local is actually initialized,
+ // which is a bit too late -- we should ideally notice this alreayd here, when the memory
+ // is conceptually allocated. But given how rare that error is and that this is a hot function,
+ // we accept this downside for now.
+ Operand::Immediate(Immediate::Uninit)
+ });
+
// StorageLive expects the local to be dead, and marks it live.
let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val);
if !matches!(old, LocalValue::Dead) {
@@ -895,6 +1022,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(())
}
+ /// Mark a storage as live, killing the previous content.
+ #[inline(always)]
+ pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> {
+ self.storage_live_dyn(local, MemPlaceMeta::None)
+ }
+
pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> {
assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
trace!("{:?} is now dead", local);
@@ -926,28 +1059,19 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self,
span: Option<Span>,
query: impl FnOnce(TyCtxtAt<'tcx>) -> Result<T, ErrorHandled>,
- ) -> InterpResult<'tcx, T> {
+ ) -> Result<T, ErrorHandled> {
// Use a precise span for better cycle errors.
query(self.tcx.at(span.unwrap_or_else(|| self.cur_span()))).map_err(|err| {
- match err {
- ErrorHandled::Reported(err) => {
- if !err.is_tainted_by_errors() && let Some(span) = span {
- // To make it easier to figure out where this error comes from, also add a note at the current location.
- self.tcx.sess.emit_note(ErroneousConstUsed { span });
- }
- err_inval!(AlreadyReported(err))
- }
- ErrorHandled::TooGeneric => err_inval!(TooGeneric),
- }
- .into()
+ err.emit_note(*self.tcx);
+ err
})
}
pub fn eval_global(
&self,
- gid: GlobalId<'tcx>,
- span: Option<Span>,
+ instance: ty::Instance<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ let gid = GlobalId { instance, promoted: None };
// For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
// and thus don't care about the parameter environment. While we could just use
// `self.param_env`, that would mean we invoke the query to evaluate the static
@@ -958,13 +1082,26 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} else {
self.param_env
};
- let val = self.ctfe_query(span, |tcx| tcx.eval_to_allocation_raw(param_env.and(gid)))?;
+ let val = self.ctfe_query(None, |tcx| tcx.eval_to_allocation_raw(param_env.and(gid)))?;
self.raw_const_to_mplace(val)
}
+ pub fn eval_mir_constant(
+ &self,
+ val: &mir::Const<'tcx>,
+ span: Option<Span>,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ let const_val = self.ctfe_query(span, |tcx| val.eval(*tcx, self.param_env, span))?;
+ self.const_val_to_op(const_val, val.ty(), layout)
+ }
+
#[must_use]
- pub fn dump_place(&self, place: Place<M::Provenance>) -> PlacePrinter<'_, 'mir, 'tcx, M> {
- PlacePrinter { ecx: self, place }
+ pub fn dump_place(
+ &self,
+ place: &PlaceTy<'tcx, M::Provenance>,
+ ) -> PlacePrinter<'_, 'mir, 'tcx, M> {
+ PlacePrinter { ecx: self, place: *place.place() }
}
#[must_use]
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index 910c3ca5d..8c0009cfd 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -24,7 +24,7 @@ use rustc_middle::ty::{self, layout::TyAndLayout, Ty};
use rustc_ast::Mutability;
use super::{
- AllocId, Allocation, ConstAllocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy,
+ AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy, Projectable,
ValueVisitor,
};
use crate::const_eval;
@@ -177,7 +177,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
if let ty::Dynamic(_, _, ty::Dyn) =
tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind()
{
- let ptr = mplace.meta.unwrap_meta().to_pointer(&tcx)?;
+ let ptr = mplace.meta().unwrap_meta().to_pointer(&tcx)?;
if let Some(alloc_id) = ptr.provenance {
// Explicitly choose const mode here, since vtables are immutable, even
// if the reference of the fat pointer is mutable.
@@ -191,7 +191,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
}
// Check if we have encountered this pointer+layout combination before.
// Only recurse for allocation-backed pointers.
- if let Some(alloc_id) = mplace.ptr.provenance {
+ if let Some(alloc_id) = mplace.ptr().provenance {
// Compute the mode with which we intern this. Our goal here is to make as many
// statics as we can immutable so they can be placed in read-only memory by LLVM.
let ref_mode = match self.mode {
@@ -267,7 +267,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
// If there is no provenance in this allocation, it does not contain references
// that point to another allocation, and we can avoid the interning walk.
- if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, align)? {
+ if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr(), size, align)? {
if !alloc.has_provenance() {
return Ok(false);
}
@@ -353,7 +353,7 @@ pub fn intern_const_alloc_recursive<
leftover_allocations,
// The outermost allocation must exist, because we allocated it with
// `Memory::allocate`.
- ret.ptr.provenance.unwrap(),
+ ret.ptr().provenance.unwrap(),
base_intern_mode,
Some(ret.layout.ty),
);
@@ -378,7 +378,8 @@ pub fn intern_const_alloc_recursive<
ecx.tcx.sess.delay_span_bug(
ecx.tcx.span,
format!(
- "error during interning should later cause validation failure: {error:?}"
+ "error during interning should later cause validation failure: {}",
+ ecx.format_error(error),
),
);
}
@@ -454,7 +455,7 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
{
/// A helper function that allocates memory for the layout given and gives you access to mutate
/// it. Once your own mutation code is done, the backing `Allocation` is removed from the
- /// current `Memory` and returned.
+ /// current `Memory` and interned as read-only into the global memory.
pub fn intern_with_temp_alloc(
&mut self,
layout: TyAndLayout<'tcx>,
@@ -462,11 +463,15 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
&mut InterpCx<'mir, 'tcx, M>,
&PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ()>,
- ) -> InterpResult<'tcx, ConstAllocation<'tcx>> {
+ ) -> InterpResult<'tcx, AllocId> {
+ // `allocate` picks a fresh AllocId that we will associate with its data below.
let dest = self.allocate(layout, MemoryKind::Stack)?;
f(self, &dest.clone().into())?;
- let mut alloc = self.memory.alloc_map.remove(&dest.ptr.provenance.unwrap()).unwrap().1;
+ let mut alloc = self.memory.alloc_map.remove(&dest.ptr().provenance.unwrap()).unwrap().1;
alloc.mutability = Mutability::Not;
- Ok(self.tcx.mk_const_alloc(alloc))
+ let alloc = self.tcx.mk_const_alloc(alloc);
+ let alloc_id = dest.ptr().provenance.unwrap(); // this was just allocated, it must have provenance
+ self.tcx.set_alloc_id_memory(alloc_id, alloc);
+ Ok(alloc_id)
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index f22cd919c..2c0ba9b26 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -5,10 +5,8 @@
use rustc_hir::def_id::DefId;
use rustc_middle::mir::{
self,
- interpret::{
- Allocation, ConstAllocation, ConstValue, GlobalId, InterpResult, PointerArithmetic, Scalar,
- },
- BinOp, NonDivergingIntrinsic,
+ interpret::{Allocation, ConstAllocation, GlobalId, InterpResult, PointerArithmetic, Scalar},
+ BinOp, ConstValue, NonDivergingIntrinsic,
};
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf as _, ValidityRequirement};
@@ -64,7 +62,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
sym::type_name => {
ensure_monomorphic_enough(tcx, tp_ty)?;
let alloc = alloc_type_name(tcx, tp_ty);
- ConstValue::Slice { data: alloc, start: 0, end: alloc.inner().len() }
+ ConstValue::Slice { data: alloc, meta: alloc.inner().size().bytes() }
}
sym::needs_drop => {
ensure_monomorphic_enough(tcx, tp_ty)?;
@@ -102,8 +100,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
| ty::Dynamic(_, _, _)
| ty::Closure(_, _)
| ty::Generator(_, _, _)
- | ty::GeneratorWitness(_)
- | ty::GeneratorWitnessMIR(_, _)
+ | ty::GeneratorWitness(..)
| ty::Never
| ty::Tuple(_)
| ty::Error(_) => ConstValue::from_target_usize(0u64, &tcx),
@@ -125,15 +122,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
) -> InterpResult<'tcx, bool> {
let instance_args = instance.args;
let intrinsic_name = self.tcx.item_name(instance.def_id());
-
- // First handle intrinsics without return place.
- let ret = match ret {
- None => match intrinsic_name {
- sym::abort => M::abort(self, "the program aborted execution".to_owned())?,
- // Unsupported diverging intrinsic.
- _ => return Ok(false),
- },
- Some(p) => p,
+ let Some(ret) = ret else {
+ // We don't support any intrinsic without return place.
+ return Ok(false);
};
match intrinsic_name {
@@ -228,7 +219,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let place = self.deref_pointer(&args[0])?;
let variant = self.read_discriminant(&place)?;
let discr = self.discriminant_for_variant(place.layout, variant)?;
- self.write_scalar(discr, dest)?;
+ self.write_immediate(*discr, dest)?;
}
sym::exact_div => {
let l = self.read_immediate(&args[0])?;
@@ -315,7 +306,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let dist = {
// Addresses are unsigned, so this is a `usize` computation. We have to do the
// overflow check separately anyway.
- let (val, overflowed, _ty) = {
+ let (val, overflowed) = {
let a_offset = ImmTy::from_uint(a_offset, usize_layout);
let b_offset = ImmTy::from_uint(b_offset, usize_layout);
self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?
@@ -332,7 +323,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// The signed form of the intrinsic allows this. If we interpret the
// difference as isize, we'll get the proper signed difference. If that
// seems *positive*, they were more than isize::MAX apart.
- let dist = val.to_target_isize(self)?;
+ let dist = val.to_scalar().to_target_isize(self)?;
if dist >= 0 {
throw_ub_custom!(
fluent::const_eval_offset_from_underflow,
@@ -342,7 +333,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
dist
} else {
// b >= a
- let dist = val.to_target_isize(self)?;
+ let dist = val.to_scalar().to_target_isize(self)?;
// If converting to isize produced a *negative* result, we had an overflow
// because they were more than isize::MAX apart.
if dist < 0 {
@@ -410,7 +401,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ValidityRequirement::Uninit => bug!("assert_uninit_valid doesn't exist"),
};
- M::abort(self, msg)?;
+ M::panic_nounwind(self, &msg)?;
+ // Skip the `go_to_block` at the end.
+ return Ok(true);
}
}
sym::simd_insert => {
@@ -470,7 +463,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
_ => return Ok(false),
}
- trace!("{:?}", self.dump_place(**dest));
+ trace!("{:?}", self.dump_place(dest));
self.go_to_block(ret);
Ok(true)
}
@@ -510,9 +503,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Performs an exact division, resulting in undefined behavior where
// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
// First, check x % y != 0 (or if that computation overflows).
- let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
+ let (res, overflow) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
assert!(!overflow); // All overflow is UB, so this should never return on overflow.
- if res.assert_bits(a.layout.size) != 0 {
+ if res.to_scalar().assert_bits(a.layout.size) != 0 {
throw_ub_custom!(
fluent::const_eval_exact_div_has_remainder,
a = format!("{a}"),
@@ -530,7 +523,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
r: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
assert!(matches!(mir_op, BinOp::Add | BinOp::Sub));
- let (val, overflowed, _ty) = self.overflowing_binary_op(mir_op, l, r)?;
+ let (val, overflowed) = self.overflowing_binary_op(mir_op, l, r)?;
Ok(if overflowed {
let size = l.layout.size;
let num_bits = size.bits();
@@ -562,7 +555,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
} else {
- val
+ val.to_scalar()
})
}
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
index e101785b6..aaa674a59 100644
--- a/compiler/rustc_const_eval/src/interpret/machine.rs
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -9,7 +9,7 @@ use std::hash::Hash;
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_middle::mir;
use rustc_middle::ty::layout::TyAndLayout;
-use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{self, TyCtxt};
use rustc_span::def_id::DefId;
use rustc_target::abi::{Align, Size};
use rustc_target::spec::abi::Abi as CallAbi;
@@ -18,7 +18,7 @@ use crate::const_eval::CheckAlignment;
use super::{
AllocBytes, AllocId, AllocRange, Allocation, ConstAllocation, FnArg, Frame, ImmTy, InterpCx,
- InterpResult, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar,
+ InterpResult, MPlaceTy, MemoryKind, OpTy, PlaceTy, Pointer, Provenance,
};
/// Data returned by Machine::stack_pop,
@@ -130,6 +130,9 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
/// Should the machine panic on allocation failures?
const PANIC_ON_ALLOC_FAIL: bool;
+ /// Should post-monomorphization checks be run when a stack frame is pushed?
+ const POST_MONO_CHECKS: bool = true;
+
/// Whether memory accesses should be alignment-checked.
fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> CheckAlignment;
@@ -218,10 +221,14 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
unwind: mir::UnwindAction,
) -> InterpResult<'tcx>;
- /// Called to abort evaluation.
- fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _msg: String) -> InterpResult<'tcx, !> {
- throw_unsup_format!("aborting execution is not supported")
- }
+ /// Called to trigger a non-unwinding panic.
+ fn panic_nounwind(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: &str) -> InterpResult<'tcx>;
+
+ /// Called when unwinding reached a state where execution should be terminated.
+ fn unwind_terminate(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ reason: mir::UnwindTerminateReason,
+ ) -> InterpResult<'tcx>;
/// Called for all binary operations where the LHS has pointer type.
///
@@ -231,24 +238,24 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
bin_op: mir::BinOp,
left: &ImmTy<'tcx, Self::Provenance>,
right: &ImmTy<'tcx, Self::Provenance>,
- ) -> InterpResult<'tcx, (Scalar<Self::Provenance>, bool, Ty<'tcx>)>;
+ ) -> InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)>;
- /// Called to write the specified `local` from the `frame`.
+ /// Called before writing the specified `local` of the `frame`.
/// Since writing a ZST is not actually accessing memory or locals, this is never invoked
/// for ZST reads.
///
/// Due to borrow checker trouble, we indicate the `frame` as an index rather than an `&mut
/// Frame`.
- #[inline]
- fn access_local_mut<'a>(
- ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
- frame: usize,
- local: mir::Local,
- ) -> InterpResult<'tcx, &'a mut Operand<Self::Provenance>>
+ #[inline(always)]
+ fn before_access_local_mut<'a>(
+ _ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+ _frame: usize,
+ _local: mir::Local,
+ ) -> InterpResult<'tcx>
where
'tcx: 'mir,
{
- ecx.stack_mut()[frame].locals[local].access_mut()
+ Ok(())
}
/// Called before a basic block terminator is executed.
@@ -461,6 +468,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
/// Called immediately after a stack frame got popped, but before jumping back to the caller.
/// The `locals` have already been destroyed!
+ #[inline(always)]
fn after_stack_pop(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_frame: Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
@@ -470,6 +478,18 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
assert!(!unwinding);
Ok(StackPopJump::Normal)
}
+
+ /// Called immediately after actual memory was allocated for a local
+ /// but before the local's stack frame is updated to point to that memory.
+ #[inline(always)]
+ fn after_local_allocated(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _frame: usize,
+ _local: mir::Local,
+ _mplace: &MPlaceTy<'tcx, Self::Provenance>,
+ ) -> InterpResult<'tcx> {
+ Ok(())
+ }
}
/// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
@@ -500,6 +520,14 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
}
#[inline(always)]
+ fn unwind_terminate(
+ _ecx: &mut InterpCx<$mir, $tcx, Self>,
+ _reason: mir::UnwindTerminateReason,
+ ) -> InterpResult<$tcx> {
+ unreachable!("unwinding cannot happen during compile-time evaluation")
+ }
+
+ #[inline(always)]
fn call_extra_fn(
_ecx: &mut InterpCx<$mir, $tcx, Self>,
fn_val: !,
@@ -527,7 +555,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
def_id: DefId,
) -> InterpResult<$tcx, Pointer> {
// Use the `AllocId` associated with the `DefId`. Any actual *access* will fail.
- Ok(Pointer::new(ecx.tcx.create_static_alloc(def_id), Size::ZERO))
+ Ok(Pointer::new(ecx.tcx.reserve_and_set_static_alloc(def_id), Size::ZERO))
}
#[inline(always)]
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index 11bffedf5..436c4d521 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -176,12 +176,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
M::adjust_alloc_base_pointer(self, ptr)
}
- pub fn create_fn_alloc_ptr(
- &mut self,
- fn_val: FnVal<'tcx, M::ExtraFnVal>,
- ) -> Pointer<M::Provenance> {
+ pub fn fn_ptr(&mut self, fn_val: FnVal<'tcx, M::ExtraFnVal>) -> Pointer<M::Provenance> {
let id = match fn_val {
- FnVal::Instance(instance) => self.tcx.create_fn_alloc(instance),
+ FnVal::Instance(instance) => self.tcx.reserve_and_set_fn_alloc(instance),
FnVal::Other(extra) => {
// FIXME(RalfJung): Should we have a cache here?
let id = self.tcx.reserve_alloc_id();
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
index b0b553c45..69eb22028 100644
--- a/compiler/rustc_const_eval/src/interpret/mod.rs
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -20,16 +20,21 @@ mod visitor;
pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here
-pub use self::eval_context::{Frame, FrameInfo, InterpCx, LocalState, LocalValue, StackPopCleanup};
+pub use self::eval_context::{Frame, FrameInfo, InterpCx, StackPopCleanup};
pub use self::intern::{intern_const_alloc_recursive, InternKind};
pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackPopJump};
pub use self::memory::{AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
-pub use self::operand::{ImmTy, Immediate, OpTy, Operand, Readable};
-pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy, Writeable};
+pub use self::operand::{ImmTy, Immediate, OpTy, Readable};
+pub use self::place::{MPlaceTy, MemPlaceMeta, PlaceTy, Writeable};
pub use self::projection::Projectable;
pub use self::terminator::FnArg;
pub use self::validity::{CtfeValidationMode, RefTracking};
pub use self::visitor::ValueVisitor;
+use self::{
+ operand::Operand,
+ place::{MemPlace, Place},
+};
+
pub(crate) use self::intrinsics::eval_nullary_intrinsic;
use eval_context::{from_known_layout, mir_assign_valid_types};
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index 6e57a56b4..a32ea204f 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -8,15 +8,13 @@ use either::{Either, Left, Right};
use rustc_hir::def::Namespace;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
-use rustc_middle::ty::{ConstInt, Ty, ValTree};
+use rustc_middle::ty::{ConstInt, Ty, TyCtxt};
use rustc_middle::{mir, ty};
-use rustc_span::Span;
use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
use super::{
- alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
- InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, PlaceTy, Pointer,
- Projectable, Provenance, Scalar,
+ alloc_range, from_known_layout, mir_assign_valid_types, AllocId, Frame, InterpCx, InterpResult,
+ MPlaceTy, Machine, MemPlace, MemPlaceMeta, PlaceTy, Pointer, Projectable, Provenance, Scalar,
};
/// An `Immediate` represents a single immediate self-contained Rust value.
@@ -33,7 +31,7 @@ pub enum Immediate<Prov: Provenance = AllocId> {
/// A pair of two scalar value (must have `ScalarPair` ABI where both fields are
/// `Scalar::Initialized`).
ScalarPair(Scalar<Prov>, Scalar<Prov>),
- /// A value of fully uninitialized memory. Can have arbitrary size and layout.
+ /// A value of fully uninitialized memory. Can have arbitrary size and layout, but must be sized.
Uninit,
}
@@ -45,24 +43,30 @@ impl<Prov: Provenance> From<Scalar<Prov>> for Immediate<Prov> {
}
impl<Prov: Provenance> Immediate<Prov> {
- pub fn from_pointer(p: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
- Immediate::Scalar(Scalar::from_pointer(p, cx))
+ pub fn from_pointer(ptr: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
+ Immediate::Scalar(Scalar::from_pointer(ptr, cx))
}
- pub fn from_maybe_pointer(p: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
- Immediate::Scalar(Scalar::from_maybe_pointer(p, cx))
+ pub fn from_maybe_pointer(ptr: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
+ Immediate::Scalar(Scalar::from_maybe_pointer(ptr, cx))
}
- pub fn new_slice(val: Scalar<Prov>, len: u64, cx: &impl HasDataLayout) -> Self {
- Immediate::ScalarPair(val, Scalar::from_target_usize(len, cx))
+ pub fn new_slice(ptr: Pointer<Option<Prov>>, len: u64, cx: &impl HasDataLayout) -> Self {
+ Immediate::ScalarPair(
+ Scalar::from_maybe_pointer(ptr, cx),
+ Scalar::from_target_usize(len, cx),
+ )
}
pub fn new_dyn_trait(
- val: Scalar<Prov>,
+ val: Pointer<Option<Prov>>,
vtable: Pointer<Option<Prov>>,
cx: &impl HasDataLayout,
) -> Self {
- Immediate::ScalarPair(val, Scalar::from_maybe_pointer(vtable, cx))
+ Immediate::ScalarPair(
+ Scalar::from_maybe_pointer(val, cx),
+ Scalar::from_maybe_pointer(vtable, cx),
+ )
}
#[inline]
@@ -88,7 +92,7 @@ impl<Prov: Provenance> Immediate<Prov> {
// ScalarPair needs a type to interpret, so we often have an immediate and a type together
// as input for binary and cast operations.
-#[derive(Clone, Debug)]
+#[derive(Clone)]
pub struct ImmTy<'tcx, Prov: Provenance = AllocId> {
imm: Immediate<Prov>,
pub layout: TyAndLayout<'tcx>,
@@ -134,6 +138,16 @@ impl<Prov: Provenance> std::fmt::Display for ImmTy<'_, Prov> {
}
}
+impl<Prov: Provenance> std::fmt::Debug for ImmTy<'_, Prov> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ // Printing `layout` results in too much noise; just print a nice version of the type.
+ f.debug_struct("ImmTy")
+ .field("imm", &self.imm)
+ .field("ty", &format_args!("{}", self.layout.ty))
+ .finish()
+ }
+}
+
impl<'tcx, Prov: Provenance> std::ops::Deref for ImmTy<'tcx, Prov> {
type Target = Immediate<Prov>;
#[inline(always)]
@@ -142,64 +156,30 @@ impl<'tcx, Prov: Provenance> std::ops::Deref for ImmTy<'tcx, Prov> {
}
}
-/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
-/// or still in memory. The latter is an optimization, to delay reading that chunk of
-/// memory and to avoid having to store arbitrary-sized data here.
-#[derive(Copy, Clone, Debug)]
-pub enum Operand<Prov: Provenance = AllocId> {
- Immediate(Immediate<Prov>),
- Indirect(MemPlace<Prov>),
-}
-
-#[derive(Clone, Debug)]
-pub struct OpTy<'tcx, Prov: Provenance = AllocId> {
- op: Operand<Prov>, // Keep this private; it helps enforce invariants.
- pub layout: TyAndLayout<'tcx>,
- /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
- /// it needs to have a different alignment than the field type would usually have.
- /// So we represent this here with a separate field that "overwrites" `layout.align`.
- /// This means `layout.align` should never be used for an `OpTy`!
- /// `None` means "alignment does not matter since this is a by-value operand"
- /// (`Operand::Immediate`); this field is only relevant for `Operand::Indirect`.
- /// Also CTFE ignores alignment anyway, so this is for Miri only.
- pub align: Option<Align>,
-}
-
-impl<'tcx, Prov: Provenance> std::ops::Deref for OpTy<'tcx, Prov> {
- type Target = Operand<Prov>;
- #[inline(always)]
- fn deref(&self) -> &Operand<Prov> {
- &self.op
- }
-}
-
-impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
- #[inline(always)]
- fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
- OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout, align: Some(mplace.align) }
- }
-}
-
-impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
- #[inline(always)]
- fn from(val: ImmTy<'tcx, Prov>) -> Self {
- OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
- }
-}
-
impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
#[inline]
pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
+ debug_assert!(layout.abi.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout");
ImmTy { imm: val.into(), layout }
}
- #[inline]
+ #[inline(always)]
pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
+ debug_assert!(
+ match (imm, layout.abi) {
+ (Immediate::Scalar(..), Abi::Scalar(..)) => true,
+ (Immediate::ScalarPair(..), Abi::ScalarPair(..)) => true,
+ (Immediate::Uninit, _) if layout.is_sized() => true,
+ _ => false,
+ },
+ "immediate {imm:?} does not fit to layout {layout:?}",
+ );
ImmTy { imm, layout }
}
#[inline]
pub fn uninit(layout: TyAndLayout<'tcx>) -> Self {
+ debug_assert!(layout.is_sized(), "immediates must be sized");
ImmTy { imm: Immediate::Uninit, layout }
}
@@ -223,6 +203,12 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
}
#[inline]
+ pub fn from_bool(b: bool, tcx: TyCtxt<'tcx>) -> Self {
+ let layout = tcx.layout_of(ty::ParamEnv::reveal_all().and(tcx.types.bool)).unwrap();
+ Self::from_scalar(Scalar::from_bool(b), layout)
+ }
+
+ #[inline]
pub fn to_const_int(self) -> ConstInt {
assert!(self.layout.ty.is_integral());
let int = self.to_scalar().assert_int();
@@ -239,6 +225,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
// if the entire value is uninit, then so is the field (can happen in ConstProp)
(Immediate::Uninit, _) => Immediate::Uninit,
// the field contains no information, can be left uninit
+ // (Scalar/ScalarPair can contain even aligned ZST, not just 1-ZST)
_ if layout.is_zst() => Immediate::Uninit,
// some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try
// to detect those here and also give them no data
@@ -290,23 +277,21 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> {
self.layout
}
- fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
- &self,
- _ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
- assert!(self.layout.is_sized()); // unsized ImmTy can only exist temporarily and should never reach this here
- Ok(MemPlaceMeta::None)
+ #[inline(always)]
+ fn meta(&self) -> MemPlaceMeta<Prov> {
+ debug_assert!(self.layout.is_sized()); // unsized ImmTy can only exist temporarily and should never reach this here
+ MemPlaceMeta::None
}
- fn offset_with_meta(
+ fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
+ ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
assert_matches!(meta, MemPlaceMeta::None); // we can't store this anywhere anyway
- Ok(self.offset_(offset, layout, cx))
+ Ok(self.offset_(offset, layout, ecx))
}
fn to_op<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
@@ -317,49 +302,95 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> {
}
}
+/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
+/// or still in memory. The latter is an optimization, to delay reading that chunk of
+/// memory and to avoid having to store arbitrary-sized data here.
+#[derive(Copy, Clone, Debug)]
+pub(super) enum Operand<Prov: Provenance = AllocId> {
+ Immediate(Immediate<Prov>),
+ Indirect(MemPlace<Prov>),
+}
+
+#[derive(Clone)]
+pub struct OpTy<'tcx, Prov: Provenance = AllocId> {
+ op: Operand<Prov>, // Keep this private; it helps enforce invariants.
+ pub layout: TyAndLayout<'tcx>,
+ /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
+ /// it needs to have a different alignment than the field type would usually have.
+ /// So we represent this here with a separate field that "overwrites" `layout.align`.
+ /// This means `layout.align` should never be used for an `OpTy`!
+ /// `None` means "alignment does not matter since this is a by-value operand"
+ /// (`Operand::Immediate`); this field is only relevant for `Operand::Indirect`.
+ /// Also CTFE ignores alignment anyway, so this is for Miri only.
+ pub align: Option<Align>,
+}
+
+impl<Prov: Provenance> std::fmt::Debug for OpTy<'_, Prov> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ // Printing `layout` results in too much noise; just print a nice version of the type.
+ f.debug_struct("OpTy")
+ .field("op", &self.op)
+ .field("ty", &format_args!("{}", self.layout.ty))
+ .finish()
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(val: ImmTy<'tcx, Prov>) -> Self {
+ OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
+ OpTy {
+ op: Operand::Indirect(*mplace.mplace()),
+ layout: mplace.layout,
+ align: Some(mplace.align),
+ }
+ }
+}
+
impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
- // Provided as inherent method since it doesn't need the `ecx` of `Projectable::meta`.
- pub fn meta(&self) -> InterpResult<'tcx, MemPlaceMeta<Prov>> {
- Ok(if self.layout.is_unsized() {
- if matches!(self.op, Operand::Immediate(_)) {
- // Unsized immediate OpTy cannot occur. We create a MemPlace for all unsized locals during argument passing.
- // However, ConstProp doesn't do that, so we can run into this nonsense situation.
- throw_inval!(ConstPropNonsense);
- }
- // There are no unsized immediates.
- self.assert_mem_place().meta
- } else {
- MemPlaceMeta::None
- })
+ #[inline(always)]
+ pub(super) fn op(&self) -> &Operand<Prov> {
+ &self.op
}
}
-impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
self.layout
}
- fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
- &self,
- _ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
- self.meta()
+ #[inline]
+ fn meta(&self) -> MemPlaceMeta<Prov> {
+ match self.as_mplace_or_imm() {
+ Left(mplace) => mplace.meta(),
+ Right(_) => {
+ debug_assert!(self.layout.is_sized(), "unsized immediates are not a thing");
+ MemPlaceMeta::None
+ }
+ }
}
- fn offset_with_meta(
+ fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
+ ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
match self.as_mplace_or_imm() {
- Left(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, cx)?.into()),
+ Left(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, ecx)?.into()),
Right(imm) => {
- assert!(!meta.has_meta()); // no place to store metadata here
+ debug_assert!(layout.is_sized(), "unsized immediates are not a thing");
+ assert_matches!(meta, MemPlaceMeta::None); // no place to store metadata here
// Every part of an uninit is uninit.
- Ok(imm.offset(offset, layout, cx)?.into())
+ Ok(imm.offset_(offset, layout, ecx).into())
}
}
}
@@ -372,18 +403,19 @@ impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for OpTy<'tcx, Pr
}
}
+/// The `Readable` trait describes interpreter values that one can read from.
pub trait Readable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>>;
}
-impl<'tcx, Prov: Provenance + 'static> Readable<'tcx, Prov> for OpTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance> Readable<'tcx, Prov> for OpTy<'tcx, Prov> {
#[inline(always)]
fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
self.as_mplace_or_imm()
}
}
-impl<'tcx, Prov: Provenance + 'static> Readable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance> Readable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
#[inline(always)]
fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
Left(self.clone())
@@ -430,7 +462,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
alloc_range(Size::ZERO, size),
/*read_provenance*/ matches!(s, abi::Pointer(_)),
)?;
- Some(ImmTy { imm: scalar.into(), layout: mplace.layout })
+ Some(ImmTy::from_scalar(scalar, mplace.layout))
}
Abi::ScalarPair(
abi::Scalar::Initialized { value: a, .. },
@@ -450,7 +482,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
alloc_range(b_offset, b_size),
/*read_provenance*/ matches!(b, abi::Pointer(_)),
)?;
- Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout })
+ Some(ImmTy::from_immediate(Immediate::ScalarPair(a_val, b_val), mplace.layout))
}
_ => {
// Neither a scalar nor scalar pair.
@@ -496,11 +528,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Abi::Scalar(abi::Scalar::Initialized { .. })
| Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. })
) {
- span_bug!(
- self.cur_span(),
- "primitive read not possible for type: {:?}",
- op.layout().ty
- );
+ span_bug!(self.cur_span(), "primitive read not possible for type: {}", op.layout().ty);
}
let imm = self.read_immediate_raw(op)?.right().unwrap();
if matches!(*imm, Immediate::Uninit) {
@@ -545,7 +573,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Turn the wide MPlace into a string (must already be dereferenced!)
pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx, &str> {
let len = mplace.len(self)?;
- let bytes = self.read_bytes_ptr_strip_provenance(mplace.ptr, Size::from_bytes(len))?;
+ let bytes = self.read_bytes_ptr_strip_provenance(mplace.ptr(), Size::from_bytes(len))?;
let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
Ok(str)
}
@@ -587,6 +615,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
let layout = self.layout_of_local(frame, local, layout)?;
let op = *frame.locals[local].access()?;
+ if matches!(op, Operand::Immediate(_)) {
+ if layout.is_unsized() {
+ // ConstProp marks *all* locals as `Immediate::Uninit` since it cannot
+ // efficiently check whether they are sized. We have to catch that case here.
+ throw_inval!(ConstPropNonsense);
+ }
+ }
Ok(OpTy { op, layout, align: Some(layout.align.abi) })
}
@@ -600,16 +635,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
match place.as_mplace_or_local() {
Left(mplace) => Ok(mplace.into()),
Right((frame, local, offset)) => {
+ debug_assert!(place.layout.is_sized()); // only sized locals can ever be `Place::Local`.
let base = self.local_to_op(&self.stack()[frame], local, None)?;
- let mut field = if let Some(offset) = offset {
- // This got offset. We can be sure that the field is sized.
- base.offset(offset, place.layout, self)?
- } else {
- assert_eq!(place.layout, base.layout);
- // Unsized cases are possible here since an unsized local will be a
- // `Place::Local` until the first projection calls `place_to_op` to extract the
- // underlying mplace.
- base
+ let mut field = match offset {
+ Some(offset) => base.offset(offset, place.layout, self)?,
+ None => {
+ // In the common case this hasn't been projected.
+ debug_assert_eq!(place.layout, base.layout);
+ base
+ }
};
field.align = Some(place.align);
Ok(field)
@@ -634,7 +668,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
op = self.project(&op, elem)?
}
- trace!("eval_place_to_op: got {:?}", *op);
+ trace!("eval_place_to_op: got {:?}", op);
// Sanity-check the type we ended up with.
debug_assert!(
mir_assign_valid_types(
@@ -645,7 +679,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
)?)?,
op.layout,
),
- "eval_place of a MIR place with type {:?} produced an interpreter operand with type {:?}",
+ "eval_place of a MIR place with type {:?} produced an interpreter operand with type {}",
mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
op.layout.ty,
);
@@ -668,7 +702,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Constant(constant) => {
let c =
- self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal)?;
+ self.subst_from_current_frame_and_normalize_erasing_regions(constant.const_)?;
// This can still fail:
// * During ConstProp, with `TooGeneric` or since the `required_consts` were not all
@@ -677,61 +711,13 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.eval_mir_constant(&c, Some(constant.span), layout)?
}
};
- trace!("{:?}: {:?}", mir_op, *op);
+ trace!("{:?}: {:?}", mir_op, op);
Ok(op)
}
- fn eval_ty_constant(
- &self,
- val: ty::Const<'tcx>,
- span: Option<Span>,
- ) -> InterpResult<'tcx, ValTree<'tcx>> {
- Ok(match val.kind() {
- ty::ConstKind::Param(_) | ty::ConstKind::Placeholder(..) => {
- throw_inval!(TooGeneric)
- }
- // FIXME(generic_const_exprs): `ConstKind::Expr` should be able to be evaluated
- ty::ConstKind::Expr(_) => throw_inval!(TooGeneric),
- ty::ConstKind::Error(reported) => {
- throw_inval!(AlreadyReported(reported.into()))
- }
- ty::ConstKind::Unevaluated(uv) => {
- let instance = self.resolve(uv.def, uv.args)?;
- let cid = GlobalId { instance, promoted: None };
- self.ctfe_query(span, |tcx| tcx.eval_to_valtree(self.param_env.and(cid)))?
- .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}"))
- }
- ty::ConstKind::Bound(..) | ty::ConstKind::Infer(..) => {
- span_bug!(self.cur_span(), "unexpected ConstKind in ctfe: {val:?}")
- }
- ty::ConstKind::Value(valtree) => valtree,
- })
- }
-
- pub fn eval_mir_constant(
- &self,
- val: &mir::ConstantKind<'tcx>,
- span: Option<Span>,
- layout: Option<TyAndLayout<'tcx>>,
- ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
- match *val {
- mir::ConstantKind::Ty(ct) => {
- let ty = ct.ty();
- let valtree = self.eval_ty_constant(ct, span)?;
- let const_val = self.tcx.valtree_to_const_val((ty, valtree));
- self.const_val_to_op(const_val, ty, layout)
- }
- mir::ConstantKind::Val(val, ty) => self.const_val_to_op(val, ty, layout),
- mir::ConstantKind::Unevaluated(uv, _) => {
- let instance = self.resolve(uv.def, uv.args)?;
- Ok(self.eval_global(GlobalId { instance, promoted: uv.promoted }, span)?.into())
- }
- }
- }
-
pub(crate) fn const_val_to_op(
&self,
- val_val: ConstValue<'tcx>,
+ val_val: mir::ConstValue<'tcx>,
ty: Ty<'tcx>,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
@@ -744,25 +730,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
};
let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
let op = match val_val {
- ConstValue::ByRef { alloc, offset } => {
- let id = self.tcx.create_memory_alloc(alloc);
+ mir::ConstValue::Indirect { alloc_id, offset } => {
// We rely on mutability being set correctly in that allocation to prevent writes
// where none should happen.
- let ptr = self.global_base_pointer(Pointer::new(id, offset))?;
+ let ptr = self.global_base_pointer(Pointer::new(alloc_id, offset))?;
Operand::Indirect(MemPlace::from_ptr(ptr.into()))
}
- ConstValue::Scalar(x) => Operand::Immediate(adjust_scalar(x)?.into()),
- ConstValue::ZeroSized => Operand::Immediate(Immediate::Uninit),
- ConstValue::Slice { data, start, end } => {
+ mir::ConstValue::Scalar(x) => Operand::Immediate(adjust_scalar(x)?.into()),
+ mir::ConstValue::ZeroSized => Operand::Immediate(Immediate::Uninit),
+ mir::ConstValue::Slice { data, meta } => {
// We rely on mutability being set correctly in `data` to prevent writes
// where none should happen.
- let ptr = Pointer::new(
- self.tcx.create_memory_alloc(data),
- Size::from_bytes(start), // offset: `start`
- );
+ let ptr = Pointer::new(self.tcx.reserve_and_set_memory_alloc(data), Size::ZERO);
Operand::Immediate(Immediate::new_slice(
- Scalar::from_pointer(self.global_base_pointer(ptr)?, &*self.tcx),
- u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
+ self.global_base_pointer(ptr)?.into(),
+ meta,
self,
))
}
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index eb0645780..b084864f3 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -1,7 +1,7 @@
use rustc_apfloat::Float;
use rustc_middle::mir;
use rustc_middle::mir::interpret::{InterpResult, Scalar};
-use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{self, FloatTy, Ty};
use rustc_span::symbol::sym;
use rustc_target::abi::Abi;
@@ -20,9 +20,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
right: &ImmTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
- let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
+ let (val, overflowed) = self.overflowing_binary_op(op, &left, &right)?;
debug_assert_eq!(
- Ty::new_tup(self.tcx.tcx, &[ty, self.tcx.types.bool]),
+ Ty::new_tup(self.tcx.tcx, &[val.layout.ty, self.tcx.types.bool]),
dest.layout.ty,
"type mismatch for result of {op:?}",
);
@@ -30,7 +30,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if let Abi::ScalarPair(..) = dest.layout.abi {
// We can use the optimized path and avoid `place_field` (which might do
// `force_allocation`).
- let pair = Immediate::ScalarPair(val, Scalar::from_bool(overflowed));
+ let pair = Immediate::ScalarPair(val.to_scalar(), Scalar::from_bool(overflowed));
self.write_immediate(pair, dest)?;
} else {
assert!(self.tcx.sess.opts.unstable_opts.randomize_layout);
@@ -38,7 +38,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// do a component-wise write here. This code path is slower than the above because
// `place_field` will have to `force_allocate` locals here.
let val_field = self.project_field(dest, 0)?;
- self.write_scalar(val, &val_field)?;
+ self.write_scalar(val.to_scalar(), &val_field)?;
let overflowed_field = self.project_field(dest, 1)?;
self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?;
}
@@ -54,9 +54,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
right: &ImmTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
- let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
- assert_eq!(ty, dest.layout.ty, "type mismatch for result of {op:?}");
- self.write_scalar(val, dest)
+ let val = self.wrapping_binary_op(op, left, right)?;
+ assert_eq!(val.layout.ty, dest.layout.ty, "type mismatch for result of {op:?}");
+ self.write_immediate(*val, dest)
}
}
@@ -66,7 +66,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
bin_op: mir::BinOp,
l: char,
r: char,
- ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
+ ) -> (ImmTy<'tcx, M::Provenance>, bool) {
use rustc_middle::mir::BinOp::*;
let res = match bin_op {
@@ -78,7 +78,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ge => l >= r,
_ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
};
- (Scalar::from_bool(res), false, self.tcx.types.bool)
+ (ImmTy::from_bool(res, *self.tcx), false)
}
fn binary_bool_op(
@@ -86,7 +86,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
bin_op: mir::BinOp,
l: bool,
r: bool,
- ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
+ ) -> (ImmTy<'tcx, M::Provenance>, bool) {
use rustc_middle::mir::BinOp::*;
let res = match bin_op {
@@ -101,33 +101,33 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
BitXor => l ^ r,
_ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
};
- (Scalar::from_bool(res), false, self.tcx.types.bool)
+ (ImmTy::from_bool(res, *self.tcx), false)
}
fn binary_float_op<F: Float + Into<Scalar<M::Provenance>>>(
&self,
bin_op: mir::BinOp,
- ty: Ty<'tcx>,
+ layout: TyAndLayout<'tcx>,
l: F,
r: F,
- ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
+ ) -> (ImmTy<'tcx, M::Provenance>, bool) {
use rustc_middle::mir::BinOp::*;
- let (val, ty) = match bin_op {
- Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
- Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
- Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
- Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
- Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
- Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
- Add => ((l + r).value.into(), ty),
- Sub => ((l - r).value.into(), ty),
- Mul => ((l * r).value.into(), ty),
- Div => ((l / r).value.into(), ty),
- Rem => ((l % r).value.into(), ty),
+ let val = match bin_op {
+ Eq => ImmTy::from_bool(l == r, *self.tcx),
+ Ne => ImmTy::from_bool(l != r, *self.tcx),
+ Lt => ImmTy::from_bool(l < r, *self.tcx),
+ Le => ImmTy::from_bool(l <= r, *self.tcx),
+ Gt => ImmTy::from_bool(l > r, *self.tcx),
+ Ge => ImmTy::from_bool(l >= r, *self.tcx),
+ Add => ImmTy::from_scalar((l + r).value.into(), layout),
+ Sub => ImmTy::from_scalar((l - r).value.into(), layout),
+ Mul => ImmTy::from_scalar((l * r).value.into(), layout),
+ Div => ImmTy::from_scalar((l / r).value.into(), layout),
+ Rem => ImmTy::from_scalar((l % r).value.into(), layout),
_ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
};
- (val, false, ty)
+ (val, false)
}
fn binary_int_op(
@@ -138,7 +138,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
left_layout: TyAndLayout<'tcx>,
r: u128,
right_layout: TyAndLayout<'tcx>,
- ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
+ ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
use rustc_middle::mir::BinOp::*;
let throw_ub_on_overflow = match bin_op {
@@ -200,19 +200,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
);
}
- return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty));
+ return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
}
// For the remaining ops, the types must be the same on both sides
if left_layout.ty != right_layout.ty {
span_bug!(
self.cur_span(),
- "invalid asymmetric binary op {:?}: {:?} ({:?}), {:?} ({:?})",
- bin_op,
- l,
- left_layout.ty,
- r,
- right_layout.ty,
+ "invalid asymmetric binary op {bin_op:?}: {l:?} ({l_ty}), {r:?} ({r_ty})",
+ l_ty = left_layout.ty,
+ r_ty = right_layout.ty,
)
}
@@ -230,7 +227,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if let Some(op) = op {
let l = self.sign_extend(l, left_layout) as i128;
let r = self.sign_extend(r, right_layout) as i128;
- return Ok((Scalar::from_bool(op(&l, &r)), false, self.tcx.types.bool));
+ return Ok((ImmTy::from_bool(op(&l, &r), *self.tcx), false));
}
let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
Div if r == 0 => throw_ub!(DivisionByZero),
@@ -267,22 +264,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
}
- return Ok((Scalar::from_uint(truncated, size), overflow, left_layout.ty));
+ return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
}
}
- let (val, ty) = match bin_op {
- Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
- Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
+ let val = match bin_op {
+ Eq => ImmTy::from_bool(l == r, *self.tcx),
+ Ne => ImmTy::from_bool(l != r, *self.tcx),
- Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
- Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
- Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
- Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
+ Lt => ImmTy::from_bool(l < r, *self.tcx),
+ Le => ImmTy::from_bool(l <= r, *self.tcx),
+ Gt => ImmTy::from_bool(l > r, *self.tcx),
+ Ge => ImmTy::from_bool(l >= r, *self.tcx),
- BitOr => (Scalar::from_uint(l | r, size), left_layout.ty),
- BitAnd => (Scalar::from_uint(l & r, size), left_layout.ty),
- BitXor => (Scalar::from_uint(l ^ r, size), left_layout.ty),
+ BitOr => ImmTy::from_uint(l | r, left_layout),
+ BitAnd => ImmTy::from_uint(l & r, left_layout),
+ BitXor => ImmTy::from_uint(l ^ r, left_layout),
Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Rem | Div => {
assert!(!left_layout.abi.is_signed());
@@ -304,12 +301,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
}
- return Ok((Scalar::from_uint(truncated, size), overflow, left_layout.ty));
+ return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
}
_ => span_bug!(
self.cur_span(),
- "invalid binary op {:?}: {:?}, {:?} (both {:?})",
+ "invalid binary op {:?}: {:?}, {:?} (both {})",
bin_op,
l,
r,
@@ -317,7 +314,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
),
};
- Ok((val, false, ty))
+ Ok((val, false))
}
fn binary_ptr_op(
@@ -325,7 +322,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
+ ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
use rustc_middle::mir::BinOp::*;
match bin_op {
@@ -336,7 +333,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let pointee_ty = left.layout.ty.builtin_deref(true).unwrap().ty;
let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
- Ok((Scalar::from_maybe_pointer(offset_ptr, self), false, left.layout.ty))
+ Ok((
+ ImmTy::from_scalar(Scalar::from_maybe_pointer(offset_ptr, self), left.layout),
+ false,
+ ))
}
// Fall back to machine hook so Miri can support more pointer ops.
@@ -344,16 +344,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
- /// Returns the result of the specified operation, whether it overflowed, and
- /// the result type.
+ /// Returns the result of the specified operation, and whether it overflowed.
pub fn overflowing_binary_op(
&self,
bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
+ ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
trace!(
- "Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
+ "Running binary op {:?}: {:?} ({}), {:?} ({})",
bin_op,
*left,
left.layout.ty,
@@ -376,15 +375,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
ty::Float(fty) => {
assert_eq!(left.layout.ty, right.layout.ty);
- let ty = left.layout.ty;
+ let layout = left.layout;
let left = left.to_scalar();
let right = right.to_scalar();
Ok(match fty {
FloatTy::F32 => {
- self.binary_float_op(bin_op, ty, left.to_f32()?, right.to_f32()?)
+ self.binary_float_op(bin_op, layout, left.to_f32()?, right.to_f32()?)
}
FloatTy::F64 => {
- self.binary_float_op(bin_op, ty, left.to_f64()?, right.to_f64()?)
+ self.binary_float_op(bin_op, layout, left.to_f64()?, right.to_f64()?)
}
})
}
@@ -392,7 +391,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// the RHS type can be different, e.g. for shifts -- but it has to be integral, too
assert!(
right.layout.ty.is_integral(),
- "Unexpected types for BinOp: {:?} {:?} {:?}",
+ "Unexpected types for BinOp: {} {:?} {}",
left.layout.ty,
bin_op,
right.layout.ty
@@ -407,7 +406,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// (Even when both sides are pointers, their type might differ, see issue #91636)
assert!(
right.layout.ty.is_any_ptr() || right.layout.ty.is_integral(),
- "Unexpected types for BinOp: {:?} {:?} {:?}",
+ "Unexpected types for BinOp: {} {:?} {}",
left.layout.ty,
bin_op,
right.layout.ty
@@ -417,22 +416,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
_ => span_bug!(
self.cur_span(),
- "Invalid MIR: bad LHS type for binop: {:?}",
+ "Invalid MIR: bad LHS type for binop: {}",
left.layout.ty
),
}
}
- /// Typed version of `overflowing_binary_op`, returning an `ImmTy`. Also ignores overflows.
#[inline]
- pub fn binary_op(
+ pub fn wrapping_binary_op(
&self,
bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
- let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?;
- Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
+ let (val, _overflow) = self.overflowing_binary_op(bin_op, left, right)?;
+ Ok(val)
}
/// Returns the result of the specified operation, whether it overflowed, and
@@ -441,12 +439,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self,
un_op: mir::UnOp,
val: &ImmTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
+ ) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
use rustc_middle::mir::UnOp::*;
let layout = val.layout;
let val = val.to_scalar();
- trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty);
+ trace!("Running unary op {:?}: {:?} ({})", un_op, val, layout.ty);
match layout.ty.kind() {
ty::Bool => {
@@ -455,7 +453,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Not => !val,
_ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
};
- Ok((Scalar::from_bool(res), false, self.tcx.types.bool))
+ Ok((ImmTy::from_bool(res, *self.tcx), false))
}
ty::Float(fty) => {
let res = match (un_op, fty) {
@@ -463,7 +461,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
(Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
_ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
};
- Ok((res, false, layout.ty))
+ Ok((ImmTy::from_scalar(res, layout), false))
}
_ => {
assert!(layout.ty.is_integral());
@@ -482,17 +480,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
(truncated, overflow || self.sign_extend(truncated, layout) != res)
}
};
- Ok((Scalar::from_uint(res, layout.size), overflow, layout.ty))
+ Ok((ImmTy::from_uint(res, layout), overflow))
}
}
}
- pub fn unary_op(
+ #[inline]
+ pub fn wrapping_unary_op(
&self,
un_op: mir::UnOp,
val: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
- let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?;
- Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
+ let (val, _overflow) = self.overflowing_unary_op(un_op, val)?;
+ Ok(val)
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index daadb7589..503004cbb 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -9,16 +9,15 @@ use either::{Either, Left, Right};
use rustc_ast::Mutability;
use rustc_index::IndexSlice;
use rustc_middle::mir;
-use rustc_middle::mir::interpret::PointerArithmetic;
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::Ty;
-use rustc_target::abi::{self, Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_VARIANT};
+use rustc_target::abi::{Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_VARIANT};
use super::{
- alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
- ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand,
- Pointer, Projectable, Provenance, Readable, Scalar,
+ alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg, ImmTy,
+ Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand, Pointer,
+ PointerArithmetic, Projectable, Provenance, Readable, Scalar,
};
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
@@ -41,37 +40,17 @@ impl<Prov: Provenance> MemPlaceMeta<Prov> {
}
}
+ #[inline(always)]
pub fn has_meta(self) -> bool {
match self {
Self::Meta(_) => true,
Self::None => false,
}
}
-
- pub(crate) fn len<'tcx>(
- &self,
- layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
- ) -> InterpResult<'tcx, u64> {
- if layout.is_unsized() {
- // We need to consult `meta` metadata
- match layout.ty.kind() {
- ty::Slice(..) | ty::Str => self.unwrap_meta().to_target_usize(cx),
- _ => bug!("len not supported on unsized type {:?}", layout.ty),
- }
- } else {
- // Go through the layout. There are lots of types that support a length,
- // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
- match layout.fields {
- abi::FieldsShape::Array { count, .. } => Ok(count),
- _ => bug!("len not supported on sized type {:?}", layout.ty),
- }
- }
- }
}
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
-pub struct MemPlace<Prov: Provenance = AllocId> {
+pub(super) struct MemPlace<Prov: Provenance = AllocId> {
/// The pointer can be a pure integer, with the `None` provenance.
pub ptr: Pointer<Option<Prov>>,
/// Metadata for unsized places. Interpretation is up to the type.
@@ -80,66 +59,6 @@ pub struct MemPlace<Prov: Provenance = AllocId> {
pub meta: MemPlaceMeta<Prov>,
}
-/// A MemPlace with its layout. Constructing it is only possible in this module.
-#[derive(Clone, Hash, Eq, PartialEq, Debug)]
-pub struct MPlaceTy<'tcx, Prov: Provenance = AllocId> {
- mplace: MemPlace<Prov>,
- pub layout: TyAndLayout<'tcx>,
- /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
- /// it needs to have a different alignment than the field type would usually have.
- /// So we represent this here with a separate field that "overwrites" `layout.align`.
- /// This means `layout.align` should never be used for a `MPlaceTy`!
- pub align: Align,
-}
-
-impl<'tcx, Prov: Provenance> std::ops::Deref for MPlaceTy<'tcx, Prov> {
- type Target = MemPlace<Prov>;
- #[inline(always)]
- fn deref(&self) -> &MemPlace<Prov> {
- &self.mplace
- }
-}
-
-#[derive(Copy, Clone, Debug)]
-pub enum Place<Prov: Provenance = AllocId> {
- /// A place referring to a value allocated in the `Memory` system.
- Ptr(MemPlace<Prov>),
-
- /// To support alloc-free locals, we are able to write directly to a local. The offset indicates
- /// where in the local this place is located; if it is `None`, no projection has been applied.
- /// Such projections are meaningful even if the offset is 0, since they can change layouts.
- /// (Without that optimization, we'd just always be a `MemPlace`.)
- /// Note that this only stores the frame index, not the thread this frame belongs to -- that is
- /// implicit. This means a `Place` must never be moved across interpreter thread boundaries!
- Local { frame: usize, local: mir::Local, offset: Option<Size> },
-}
-
-#[derive(Clone, Debug)]
-pub struct PlaceTy<'tcx, Prov: Provenance = AllocId> {
- place: Place<Prov>, // Keep this private; it helps enforce invariants.
- pub layout: TyAndLayout<'tcx>,
- /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
- /// it needs to have a different alignment than the field type would usually have.
- /// So we represent this here with a separate field that "overwrites" `layout.align`.
- /// This means `layout.align` should never be used for a `PlaceTy`!
- pub align: Align,
-}
-
-impl<'tcx, Prov: Provenance> std::ops::Deref for PlaceTy<'tcx, Prov> {
- type Target = Place<Prov>;
- #[inline(always)]
- fn deref(&self) -> &Place<Prov> {
- &self.place
- }
-}
-
-impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
- #[inline(always)]
- fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
- PlaceTy { place: Place::Ptr(*mplace), layout: mplace.layout, align: mplace.align }
- }
-}
-
impl<Prov: Provenance> MemPlace<Prov> {
#[inline(always)]
pub fn from_ptr(ptr: Pointer<Option<Prov>>) -> Self {
@@ -157,7 +76,7 @@ impl<Prov: Provenance> MemPlace<Prov> {
}
/// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
- #[inline(always)]
+ #[inline]
pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Prov> {
match self.meta {
MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)),
@@ -183,6 +102,28 @@ impl<Prov: Provenance> MemPlace<Prov> {
}
}
+/// A MemPlace with its layout. Constructing it is only possible in this module.
+#[derive(Clone, Hash, Eq, PartialEq)]
+pub struct MPlaceTy<'tcx, Prov: Provenance = AllocId> {
+ mplace: MemPlace<Prov>,
+ pub layout: TyAndLayout<'tcx>,
+ /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
+ /// it needs to have a different alignment than the field type would usually have.
+ /// So we represent this here with a separate field that "overwrites" `layout.align`.
+ /// This means `layout.align` should never be used for a `MPlaceTy`!
+ pub align: Align,
+}
+
+impl<Prov: Provenance> std::fmt::Debug for MPlaceTy<'_, Prov> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ // Printing `layout` results in too much noise; just print a nice version of the type.
+ f.debug_struct("MPlaceTy")
+ .field("mplace", &self.mplace)
+ .field("ty", &format_args!("{}", self.layout.ty))
+ .finish()
+ }
+}
+
impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
/// Produces a MemPlace that works for ZST but nothing else.
/// Conceptually this is a new allocation, but it doesn't actually create an allocation so you
@@ -212,30 +153,48 @@ impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
align: layout.align.abi,
}
}
+
+ /// Adjust the provenance of the main pointer (metadata is unaffected).
+ pub fn map_provenance(self, f: impl FnOnce(Option<Prov>) -> Option<Prov>) -> Self {
+ MPlaceTy { mplace: self.mplace.map_provenance(f), ..self }
+ }
+
+ #[inline(always)]
+ pub(super) fn mplace(&self) -> &MemPlace<Prov> {
+ &self.mplace
+ }
+
+ #[inline(always)]
+ pub fn ptr(&self) -> Pointer<Option<Prov>> {
+ self.mplace.ptr
+ }
+
+ #[inline(always)]
+ pub fn to_ref(&self, cx: &impl HasDataLayout) -> Immediate<Prov> {
+ self.mplace.to_ref(cx)
+ }
}
-impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
self.layout
}
- fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
- &self,
- _ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
- Ok(self.meta)
+ #[inline(always)]
+ fn meta(&self) -> MemPlaceMeta<Prov> {
+ self.mplace.meta
}
- fn offset_with_meta(
+ fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
+ ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
Ok(MPlaceTy {
- mplace: self.mplace.offset_with_meta_(offset, meta, cx)?,
+ mplace: self.mplace.offset_with_meta_(offset, meta, ecx)?,
align: self.align.restrict_for_offset(offset),
layout,
})
@@ -249,31 +208,109 @@ impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for MPlaceTy<'tcx
}
}
-impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
+#[derive(Copy, Clone, Debug)]
+pub(super) enum Place<Prov: Provenance = AllocId> {
+ /// A place referring to a value allocated in the `Memory` system.
+ Ptr(MemPlace<Prov>),
+
+ /// To support alloc-free locals, we are able to write directly to a local. The offset indicates
+ /// where in the local this place is located; if it is `None`, no projection has been applied.
+ /// Such projections are meaningful even if the offset is 0, since they can change layouts.
+ /// (Without that optimization, we'd just always be a `MemPlace`.)
+ /// Note that this only stores the frame index, not the thread this frame belongs to -- that is
+ /// implicit. This means a `Place` must never be moved across interpreter thread boundaries!
+ ///
+ /// This variant shall not be used for unsized types -- those must always live in memory.
+ Local { frame: usize, local: mir::Local, offset: Option<Size> },
+}
+
+#[derive(Clone)]
+pub struct PlaceTy<'tcx, Prov: Provenance = AllocId> {
+ place: Place<Prov>, // Keep this private; it helps enforce invariants.
+ pub layout: TyAndLayout<'tcx>,
+ /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
+ /// it needs to have a different alignment than the field type would usually have.
+ /// So we represent this here with a separate field that "overwrites" `layout.align`.
+ /// This means `layout.align` should never be used for a `PlaceTy`!
+ pub align: Align,
+}
+
+impl<Prov: Provenance> std::fmt::Debug for PlaceTy<'_, Prov> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ // Printing `layout` results in too much noise; just print a nice version of the type.
+ f.debug_struct("PlaceTy")
+ .field("place", &self.place)
+ .field("ty", &format_args!("{}", self.layout.ty))
+ .finish()
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
+ PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout, align: mplace.align }
+ }
+}
+
+impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ pub(super) fn place(&self) -> &Place<Prov> {
+ &self.place
+ }
+
+ /// A place is either an mplace or some local.
+ #[inline(always)]
+ pub fn as_mplace_or_local(
+ &self,
+ ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>)> {
+ match self.place {
+ Place::Ptr(mplace) => Left(MPlaceTy { mplace, layout: self.layout, align: self.align }),
+ Place::Local { frame, local, offset } => Right((frame, local, offset)),
+ }
+ }
+
+ #[inline(always)]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+ pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
+ self.as_mplace_or_local().left().unwrap_or_else(|| {
+ bug!(
+ "PlaceTy of type {} was a local when it was expected to be an MPlace",
+ self.layout.ty
+ )
+ })
+ }
+}
+
+impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
#[inline(always)]
fn layout(&self) -> TyAndLayout<'tcx> {
self.layout
}
- fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
- ecx.place_meta(self)
+ #[inline]
+ fn meta(&self) -> MemPlaceMeta<Prov> {
+ match self.as_mplace_or_local() {
+ Left(mplace) => mplace.meta(),
+ Right(_) => {
+ debug_assert!(self.layout.is_sized(), "unsized locals should live in memory");
+ MemPlaceMeta::None
+ }
+ }
}
- fn offset_with_meta(
+ fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
+ ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
Ok(match self.as_mplace_or_local() {
- Left(mplace) => mplace.offset_with_meta(offset, meta, layout, cx)?.into(),
+ Left(mplace) => mplace.offset_with_meta(offset, meta, layout, ecx)?.into(),
Right((frame, local, old_offset)) => {
+ debug_assert!(layout.is_sized(), "unsized locals should live in memory");
assert_matches!(meta, MemPlaceMeta::None); // we couldn't store it anyway...
- let new_offset = cx
+ let new_offset = ecx
.data_layout()
.offset(old_offset.unwrap_or(Size::ZERO).bytes(), offset.bytes())?;
PlaceTy {
@@ -301,11 +338,11 @@ impl<'tcx, Prov: Provenance + 'static> Projectable<'tcx, Prov> for PlaceTy<'tcx,
impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
#[inline(always)]
pub fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
- match **self {
+ match self.op() {
Operand::Indirect(mplace) => {
- Left(MPlaceTy { mplace, layout: self.layout, align: self.align.unwrap() })
+ Left(MPlaceTy { mplace: *mplace, layout: self.layout, align: self.align.unwrap() })
}
- Operand::Immediate(imm) => Right(ImmTy::from_immediate(imm, self.layout)),
+ Operand::Immediate(imm) => Right(ImmTy::from_immediate(*imm, self.layout)),
}
}
@@ -321,30 +358,7 @@ impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
}
}
-impl<'tcx, Prov: Provenance + 'static> PlaceTy<'tcx, Prov> {
- /// A place is either an mplace or some local.
- #[inline]
- pub fn as_mplace_or_local(
- &self,
- ) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local, Option<Size>)> {
- match **self {
- Place::Ptr(mplace) => Left(MPlaceTy { mplace, layout: self.layout, align: self.align }),
- Place::Local { frame, local, offset } => Right((frame, local, offset)),
- }
- }
-
- #[inline(always)]
- #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
- pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
- self.as_mplace_or_local().left().unwrap_or_else(|| {
- bug!(
- "PlaceTy of type {} was a local when it was expected to be an MPlace",
- self.layout.ty
- )
- })
- }
-}
-
+/// The `Weiteable` trait describes interpreter values that can be written to.
pub trait Writeable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
fn as_mplace_or_local(
&self,
@@ -356,7 +370,7 @@ pub trait Writeable<'tcx, Prov: Provenance>: Projectable<'tcx, Prov> {
) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>>;
}
-impl<'tcx, Prov: Provenance + 'static> Writeable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
#[inline(always)]
fn as_mplace_or_local(
&self,
@@ -375,7 +389,7 @@ impl<'tcx, Prov: Provenance + 'static> Writeable<'tcx, Prov> for PlaceTy<'tcx, P
}
}
-impl<'tcx, Prov: Provenance + 'static> Writeable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
+impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
#[inline(always)]
fn as_mplace_or_local(
&self,
@@ -396,23 +410,9 @@ impl<'tcx, Prov: Provenance + 'static> Writeable<'tcx, Prov> for MPlaceTy<'tcx,
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
where
- Prov: Provenance + 'static,
+ Prov: Provenance,
M: Machine<'mir, 'tcx, Provenance = Prov>,
{
- /// Get the metadata of the given place.
- pub(super) fn place_meta(
- &self,
- place: &PlaceTy<'tcx, M::Provenance>,
- ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>> {
- if place.layout.is_unsized() {
- // For `Place::Local`, the metadata is stored with the local, not the place. So we have
- // to look that up first.
- self.place_to_op(place)?.meta()
- } else {
- Ok(MemPlaceMeta::None)
- }
- }
-
/// Take a value, which represents a (thin or wide) reference, and make it a place.
/// Alignment is just based on the type. This is the inverse of `mplace_to_ref()`.
///
@@ -444,7 +444,7 @@ where
&self,
mplace: &MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
- let imm = mplace.to_ref(self);
+ let imm = mplace.mplace.to_ref(self);
let layout = self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, mplace.layout.ty))?;
Ok(ImmTy::from_immediate(imm, layout))
}
@@ -460,7 +460,7 @@ where
trace!("deref to {} on {:?}", val.layout.ty, *val);
if val.layout.ty.is_box() {
- bug!("dereferencing {:?}", val.layout.ty);
+ bug!("dereferencing {}", val.layout.ty);
}
let mplace = self.ref_to_mplace(&val)?;
@@ -478,7 +478,7 @@ where
.size_and_align_of_mplace(&mplace)?
.unwrap_or((mplace.layout.size, mplace.layout.align.abi));
// Due to packed places, only `mplace.align` matters.
- self.get_ptr_alloc(mplace.ptr, size, mplace.align)
+ self.get_ptr_alloc(mplace.ptr(), size, mplace.align)
}
#[inline]
@@ -491,7 +491,7 @@ where
.size_and_align_of_mplace(&mplace)?
.unwrap_or((mplace.layout.size, mplace.layout.align.abi));
// Due to packed places, only `mplace.align` matters.
- self.get_ptr_alloc_mut(mplace.ptr, size, mplace.align)
+ self.get_ptr_alloc_mut(mplace.ptr(), size, mplace.align)
}
/// Check if this mplace is dereferenceable and sufficiently aligned.
@@ -502,7 +502,7 @@ where
// Due to packed places, only `mplace.align` matters.
let align =
if M::enforce_alignment(self).should_check() { mplace.align } else { Align::ONE };
- self.check_ptr_access_align(mplace.ptr, size, align, CheckInAllocMsg::DerefTest)?;
+ self.check_ptr_access_align(mplace.ptr(), size, align, CheckInAllocMsg::DerefTest)?;
Ok(())
}
@@ -537,8 +537,24 @@ where
frame: usize,
local: mir::Local,
) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
- let layout = self.layout_of_local(&self.stack()[frame], local, None)?;
- let place = Place::Local { frame, local, offset: None };
+ // Other parts of the system rely on `Place::Local` never being unsized.
+ // So we eagerly check here if this local has an MPlace, and if yes we use it.
+ let frame_ref = &self.stack()[frame];
+ let layout = self.layout_of_local(frame_ref, local, None)?;
+ let place = if layout.is_sized() {
+ // We can just always use the `Local` for sized values.
+ Place::Local { frame, local, offset: None }
+ } else {
+ // Unsized `Local` isn't okay (we cannot store the metadata).
+ match frame_ref.locals[local].access()? {
+ Operand::Immediate(_) => {
+ // ConstProp marks *all* locals as `Immediate::Uninit` since it cannot
+ // efficiently check whether they are sized. We have to catch that case here.
+ throw_inval!(ConstPropNonsense);
+ }
+ Operand::Indirect(mplace) => Place::Ptr(*mplace),
+ }
+ };
Ok(PlaceTy { place, layout, align: layout.align.abi })
}
@@ -555,7 +571,7 @@ where
place = self.project(&place, elem)?
}
- trace!("{:?}", self.dump_place(place.place));
+ trace!("{:?}", self.dump_place(&place));
// Sanity-check the type we ended up with.
debug_assert!(
mir_assign_valid_types(
@@ -566,7 +582,7 @@ where
)?)?,
place.layout,
),
- "eval_place of a MIR place with type {:?} produced an interpreter place with type {:?}",
+ "eval_place of a MIR place with type {:?} produced an interpreter place with type {}",
mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
place.layout.ty,
);
@@ -631,7 +647,8 @@ where
// just fall back to the indirect path.
dest.force_mplace(self)?
} else {
- match M::access_local_mut(self, frame, local)? {
+ M::before_access_local_mut(self, frame, local)?;
+ match self.stack_mut()[frame].locals[local].access_mut()? {
Operand::Immediate(local_val) => {
// Local can be updated in-place.
*local_val = src;
@@ -751,7 +768,8 @@ where
// FIXME: share the logic with `write_immediate_no_validate`.
dest.force_mplace(self)?
} else {
- match M::access_local_mut(self, frame, local)? {
+ M::before_access_local_mut(self, frame, local)?;
+ match self.stack_mut()[frame].locals[local].access_mut()? {
Operand::Immediate(local) => {
*local = Immediate::Uninit;
return Ok(());
@@ -782,6 +800,13 @@ where
dest: &impl Writeable<'tcx, M::Provenance>,
allow_transmute: bool,
) -> InterpResult<'tcx> {
+ // Generally for transmutation, data must be valid both at the old and new type.
+ // But if the types are the same, the 2nd validation below suffices.
+ if src.layout().ty != dest.layout().ty && M::enforce_validity(self, src.layout()) {
+ self.validate_operand(&src.to_op(self)?)?;
+ }
+
+ // Do the actual copy.
self.copy_op_no_validate(src, dest, allow_transmute)?;
if M::enforce_validity(self, dest.layout()) {
@@ -810,7 +835,7 @@ where
if !allow_transmute && !layout_compat {
span_bug!(
self.cur_span(),
- "type mismatch when copying!\nsrc: {:?},\ndest: {:?}",
+ "type mismatch when copying!\nsrc: {},\ndest: {}",
src.layout().ty,
dest.layout().ty,
);
@@ -845,7 +870,7 @@ where
*src_val,
src.layout(),
dest_mem.align,
- *dest_mem,
+ dest_mem.mplace,
)
};
}
@@ -872,7 +897,12 @@ where
// (Or as the `Assign` docs put it, assignments "not producing primitives" must be
// non-overlapping.)
self.mem_copy(
- src.ptr, src.align, dest.ptr, dest.align, dest_size, /*nonoverlapping*/ true,
+ src.ptr(),
+ src.align,
+ dest.ptr(),
+ dest.align,
+ dest_size,
+ /*nonoverlapping*/ true,
)
}
@@ -887,7 +917,8 @@ where
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
let mplace = match place.place {
Place::Local { frame, local, offset } => {
- let whole_local = match M::access_local_mut(self, frame, local)? {
+ M::before_access_local_mut(self, frame, local)?;
+ let whole_local = match self.stack_mut()[frame].locals[local].access_mut()? {
&mut Operand::Immediate(local_val) => {
// We need to make an allocation.
@@ -896,10 +927,8 @@ where
// that has different alignment than the outer field.
let local_layout =
self.layout_of_local(&self.stack()[frame], local, None)?;
- if local_layout.is_unsized() {
- throw_unsup_format!("unsized locals are not supported");
- }
- let mplace = *self.allocate(local_layout, MemoryKind::Stack)?;
+ assert!(local_layout.is_sized(), "unsized locals cannot be immediate");
+ let mplace = self.allocate(local_layout, MemoryKind::Stack)?;
// Preserve old value. (As an optimization, we can skip this if it was uninit.)
if !matches!(local_val, Immediate::Uninit) {
// We don't have to validate as we can assume the local was already
@@ -909,15 +938,16 @@ where
local_val,
local_layout,
local_layout.align.abi,
- mplace,
+ mplace.mplace,
)?;
}
+ M::after_local_allocated(self, frame, local, &mplace)?;
// Now we can call `access_mut` again, asserting it goes well, and actually
// overwrite things. This points to the entire allocation, not just the part
// the place refers to, i.e. we do this before we apply `offset`.
- *M::access_local_mut(self, frame, local).unwrap() =
- Operand::Indirect(mplace);
- mplace
+ *self.stack_mut()[frame].locals[local].access_mut().unwrap() =
+ Operand::Indirect(mplace.mplace);
+ mplace.mplace
}
&mut Operand::Indirect(mplace) => mplace, // this already was an indirect local
};
@@ -1006,7 +1036,7 @@ where
pub fn raw_const_to_mplace(
&self,
- raw: ConstAlloc<'tcx>,
+ raw: mir::ConstAlloc<'tcx>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
// This must be an allocation in `tcx`
let _ = self.tcx.global_alloc(raw.alloc_id);
@@ -1025,12 +1055,12 @@ where
matches!(mplace.layout.ty.kind(), ty::Dynamic(_, _, ty::Dyn)),
"`unpack_dyn_trait` only makes sense on `dyn*` types"
);
- let vtable = mplace.meta.unwrap_meta().to_pointer(self)?;
+ let vtable = mplace.meta().unwrap_meta().to_pointer(self)?;
let (ty, _) = self.get_ptr_vtable(vtable)?;
let layout = self.layout_of(ty)?;
let mplace = MPlaceTy {
- mplace: MemPlace { meta: MemPlaceMeta::None, ..**mplace },
+ mplace: MemPlace { meta: MemPlaceMeta::None, ..mplace.mplace },
layout,
align: layout.align.abi,
};
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
index 882097ad2..70df3d8fd 100644
--- a/compiler/rustc_const_eval/src/interpret/projection.rs
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -7,12 +7,13 @@
//! but we still need to do bounds checking and adjust the layout. To not duplicate that with MPlaceTy, we actually
//! implement the logic on OpTy, and MPlaceTy calls that.
+use std::marker::PhantomData;
+use std::ops::Range;
+
use rustc_middle::mir;
use rustc_middle::ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::Ty;
-use rustc_middle::ty::TyCtxt;
-use rustc_target::abi::HasDataLayout;
use rustc_target::abi::Size;
use rustc_target::abi::{self, VariantIdx};
@@ -24,44 +25,59 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
fn layout(&self) -> TyAndLayout<'tcx>;
/// Get the metadata of a wide value.
- fn meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
- &self,
- ecx: &InterpCx<'mir, 'tcx, M>,
- ) -> InterpResult<'tcx, MemPlaceMeta<M::Provenance>>;
+ fn meta(&self) -> MemPlaceMeta<Prov>;
+ /// Get the length of a slice/string/array stored here.
fn len<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, u64> {
- self.meta(ecx)?.len(self.layout(), ecx)
+ let layout = self.layout();
+ if layout.is_unsized() {
+ // We need to consult `meta` metadata
+ match layout.ty.kind() {
+ ty::Slice(..) | ty::Str => self.meta().unwrap_meta().to_target_usize(ecx),
+ _ => bug!("len not supported on unsized type {:?}", layout.ty),
+ }
+ } else {
+ // Go through the layout. There are lots of types that support a length,
+ // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
+ match layout.fields {
+ abi::FieldsShape::Array { count, .. } => Ok(count),
+ _ => bug!("len not supported on sized type {:?}", layout.ty),
+ }
+ }
}
/// Offset the value by the given amount, replacing the layout and metadata.
- fn offset_with_meta(
+ fn offset_with_meta<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
meta: MemPlaceMeta<Prov>,
layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
+ ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self>;
- fn offset(
+ #[inline]
+ fn offset<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
offset: Size,
layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
+ ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
assert!(layout.is_sized());
- self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
+ self.offset_with_meta(offset, MemPlaceMeta::None, layout, ecx)
}
- fn transmute(
+ #[inline]
+ fn transmute<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
&self,
layout: TyAndLayout<'tcx>,
- cx: &impl HasDataLayout,
+ ecx: &InterpCx<'mir, 'tcx, M>,
) -> InterpResult<'tcx, Self> {
+ assert!(self.layout().is_sized() && layout.is_sized());
assert_eq!(self.layout().size, layout.size);
- self.offset_with_meta(Size::ZERO, MemPlaceMeta::None, layout, cx)
+ self.offset_with_meta(Size::ZERO, MemPlaceMeta::None, layout, ecx)
}
/// Convert this to an `OpTy`. This might be an irreversible transformation, but is useful for
@@ -72,10 +88,30 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
}
+/// A type representing iteration over the elements of an array.
+pub struct ArrayIterator<'tcx, 'a, Prov: Provenance, P: Projectable<'tcx, Prov>> {
+ base: &'a P,
+ range: Range<u64>,
+ stride: Size,
+ field_layout: TyAndLayout<'tcx>,
+ _phantom: PhantomData<Prov>, // otherwise it says `Prov` is never used...
+}
+
+impl<'tcx, 'a, Prov: Provenance, P: Projectable<'tcx, Prov>> ArrayIterator<'tcx, 'a, Prov, P> {
+ /// Should be the same `ecx` on each call, and match the one used to create the iterator.
+ pub fn next<'mir, M: Machine<'mir, 'tcx, Provenance = Prov>>(
+ &mut self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, Option<(u64, P)>> {
+ let Some(idx) = self.range.next() else { return Ok(None) };
+ Ok(Some((idx, self.base.offset(self.stride * idx, self.field_layout, ecx)?)))
+ }
+}
+
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
where
- Prov: Provenance + 'static,
+ Prov: Provenance,
M: Machine<'mir, 'tcx, Provenance = Prov>,
{
/// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
@@ -104,7 +140,7 @@ where
// But const-prop actually feeds us such nonsense MIR! (see test `const_prop/issue-86351.rs`)
throw_inval!(ConstPropNonsense);
}
- let base_meta = base.meta(self)?;
+ let base_meta = base.meta();
// Re-use parent metadata to determine dynamic field layout.
// With custom DSTS, this *will* execute user-defined code, but the same
// happens at run-time so that's okay.
@@ -132,7 +168,7 @@ where
base: &P,
variant: VariantIdx,
) -> InterpResult<'tcx, P> {
- assert!(!base.meta(self)?.has_meta());
+ assert!(!base.meta().has_meta());
// Downcasts only change the layout.
// (In particular, no check about whether this is even the active variant -- that's by design,
// see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
@@ -206,20 +242,13 @@ where
pub fn project_array_fields<'a, P: Projectable<'tcx, M::Provenance>>(
&self,
base: &'a P,
- ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, P>> + 'a>
- where
- 'tcx: 'a,
- {
+ ) -> InterpResult<'tcx, ArrayIterator<'tcx, 'a, M::Provenance, P>> {
let abi::FieldsShape::Array { stride, .. } = base.layout().fields else {
span_bug!(self.cur_span(), "operand_array_fields: expected an array layout");
};
let len = base.len(self)?;
let field_layout = base.layout().field(self, 0);
- let tcx: TyCtxt<'tcx> = *self.tcx;
- // `Size` multiplication
- Ok((0..len).map(move |i| {
- base.offset_with_meta(stride * i, MemPlaceMeta::None, field_layout, &tcx)
- }))
+ Ok(ArrayIterator { base, range: 0..len, stride, field_layout, _phantom: PhantomData })
}
/// Subslicing
@@ -287,7 +316,11 @@ where
{
use rustc_middle::mir::ProjectionElem::*;
Ok(match proj_elem {
- OpaqueCast(ty) => base.transmute(self.layout_of(ty)?, self)?,
+ OpaqueCast(ty) => {
+ span_bug!(self.cur_span(), "OpaqueCast({ty}) encountered after borrowck")
+ }
+ // We don't want anything happening here, this is here as a dummy.
+ Subtype(_) => base.transmute(base.layout(), self)?,
Field(field, _) => self.project_field(base, field.index())?,
Downcast(_, variant) => self.project_downcast(base, variant)?,
Deref => self.deref_pointer(&base.to_op(self)?)?.into(),
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
index 0740894a4..284e13407 100644
--- a/compiler/rustc_const_eval/src/interpret/step.rs
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -177,7 +177,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
UnaryOp(un_op, ref operand) => {
// The operand always has the same type as the result.
let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
- let val = self.unary_op(un_op, &val)?;
+ let val = self.wrapping_unary_op(un_op, &val)?;
assert_eq!(val.layout, dest.layout, "layout mismatch for result of {un_op:?}");
self.write_immediate(*val, &dest)?;
}
@@ -204,7 +204,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// avoid writing each operand individually and instead just make many copies
// of the first element.
let elem_size = first.layout.size;
- let first_ptr = first.ptr;
+ let first_ptr = first.ptr();
let rest_ptr = first_ptr.offset(elem_size, self)?;
// For the alignment of `rest_ptr`, we crucially do *not* use `first.align` as
// that place might be more aligned than its type mandates (a `u8` array could
@@ -301,11 +301,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let op = self.eval_place_to_op(place, None)?;
let variant = self.read_discriminant(&op)?;
let discr = self.discriminant_for_variant(op.layout, variant)?;
- self.write_scalar(discr, &dest)?;
+ self.write_immediate(*discr, &dest)?;
}
}
- trace!("{:?}", self.dump_place(*dest));
+ trace!("{:?}", self.dump_place(&dest));
Ok(())
}
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
index 3c03172bb..578dd6622 100644
--- a/compiler/rustc_const_eval/src/interpret/terminator.rs
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -2,19 +2,25 @@ use std::borrow::Cow;
use either::Either;
use rustc_ast::ast::InlineAsmOptions;
-use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
-use rustc_middle::ty::Instance;
use rustc_middle::{
mir,
- ty::{self, Ty},
+ ty::{
+ self,
+ layout::{FnAbiOf, IntegerExt, LayoutOf, TyAndLayout},
+ AdtDef, Instance, Ty,
+ },
+};
+use rustc_span::sym;
+use rustc_target::abi::{self, FieldIdx};
+use rustc_target::abi::{
+ call::{ArgAbi, FnAbi, PassMode},
+ Integer,
};
-use rustc_target::abi;
-use rustc_target::abi::call::{ArgAbi, ArgAttribute, ArgAttributes, FnAbi, PassMode};
use rustc_target::spec::abi::Abi;
use super::{
- AllocId, FnVal, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemoryKind, OpTy,
- Operand, PlaceTy, Provenance, Scalar, StackPopCleanup,
+ AllocId, FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Projectable,
+ Provenance, Scalar, StackPopCleanup,
};
use crate::fluent_generated as fluent;
@@ -92,14 +98,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
for (const_int, target) in targets.iter() {
// Compare using MIR BinOp::Eq, to also support pointer values.
// (Avoiding `self.binary_op` as that does some redundant layout computation.)
- let res = self
- .overflowing_binary_op(
- mir::BinOp::Eq,
- &discr,
- &ImmTy::from_uint(const_int, discr.layout),
- )?
- .0;
- if res.to_bool()? {
+ let res = self.wrapping_binary_op(
+ mir::BinOp::Eq,
+ &discr,
+ &ImmTy::from_uint(const_int, discr.layout),
+ )?;
+ if res.to_scalar().to_bool()? {
target_block = target;
break;
}
@@ -145,7 +149,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
_ => span_bug!(
terminator.source_info.span,
- "invalid callee of type {:?}",
+ "invalid callee of type {}",
func.layout.ty
),
};
@@ -196,15 +200,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
- Terminate => {
- // FIXME: maybe should call `panic_no_unwind` lang item instead.
- M::abort(self, "panic in a function that cannot unwind".to_owned())?;
+ UnwindTerminate(reason) => {
+ M::unwind_terminate(self, reason)?;
}
// When we encounter Resume, we've finished unwinding
// cleanup for the current stack frame. We pop it in order
// to continue unwinding the next frame
- Resume => {
+ UnwindResume => {
trace!("unwinding: resuming from cleanup");
// By definition, a Resume terminator means
// that we're unwinding
@@ -252,90 +255,172 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.collect()
}
- fn check_argument_compat(
- caller_abi: &ArgAbi<'tcx, Ty<'tcx>>,
- callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
- ) -> bool {
- // Heuristic for type comparison.
- let layout_compat = || {
- if caller_abi.layout.ty == callee_abi.layout.ty {
- // No question
- return true;
- }
- if caller_abi.layout.is_unsized() || callee_abi.layout.is_unsized() {
- // No, no, no. We require the types to *exactly* match for unsized arguments. If
- // these are somehow unsized "in a different way" (say, `dyn Trait` vs `[i32]`),
- // then who knows what happens.
- return false;
+ /// Find the wrapped inner type of a transparent wrapper.
+ /// Must not be called on 1-ZST (as they don't have a uniquely defined "wrapped field").
+ ///
+ /// We work with `TyAndLayout` here since that makes it much easier to iterate over all fields.
+ fn unfold_transparent(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ may_unfold: impl Fn(AdtDef<'tcx>) -> bool,
+ ) -> TyAndLayout<'tcx> {
+ match layout.ty.kind() {
+ ty::Adt(adt_def, _) if adt_def.repr().transparent() && may_unfold(*adt_def) => {
+ assert!(!adt_def.is_enum());
+ // Find the non-1-ZST field, and recurse.
+ let (_, field) = layout.non_1zst_field(self).unwrap();
+ self.unfold_transparent(field, may_unfold)
}
- if caller_abi.layout.size != callee_abi.layout.size
- || caller_abi.layout.align.abi != callee_abi.layout.align.abi
- {
- // This cannot go well...
- return false;
+ // Not a transparent type, no further unfolding.
+ _ => layout,
+ }
+ }
+
+ /// Unwrap types that are guaranteed a null-pointer-optimization
+ fn unfold_npo(&self, layout: TyAndLayout<'tcx>) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
+ // Check if this is `Option` wrapping some type.
+ let inner = match layout.ty.kind() {
+ ty::Adt(def, args) if self.tcx.is_diagnostic_item(sym::Option, def.did()) => {
+ args[0].as_type().unwrap()
}
- // The rest *should* be okay, but we are extra conservative.
- match (caller_abi.layout.abi, callee_abi.layout.abi) {
- // Different valid ranges are okay (once we enforce validity,
- // that will take care to make it UB to leave the range, just
- // like for transmute).
- (abi::Abi::Scalar(caller), abi::Abi::Scalar(callee)) => {
- caller.primitive() == callee.primitive()
- }
- (
- abi::Abi::ScalarPair(caller1, caller2),
- abi::Abi::ScalarPair(callee1, callee2),
- ) => {
- caller1.primitive() == callee1.primitive()
- && caller2.primitive() == callee2.primitive()
- }
- // Be conservative
- _ => false,
+ _ => {
+ // Not an `Option`.
+ return Ok(layout);
}
};
- // When comparing the PassMode, we have to be smart about comparing the attributes.
- let arg_attr_compat = |a1: &ArgAttributes, a2: &ArgAttributes| {
- // There's only one regular attribute that matters for the call ABI: InReg.
- // Everything else is things like noalias, dereferenceable, nonnull, ...
- // (This also applies to pointee_size, pointee_align.)
- if a1.regular.contains(ArgAttribute::InReg) != a2.regular.contains(ArgAttribute::InReg)
- {
- return false;
+ let inner = self.layout_of(inner)?;
+ // Check if the inner type is one of the NPO-guaranteed ones.
+ // For that we first unpeel transparent *structs* (but not unions).
+ let is_npo = |def: AdtDef<'tcx>| {
+ self.tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed)
+ };
+ let inner = self.unfold_transparent(inner, /* may_unfold */ |def| {
+ // Stop at NPO tpyes so that we don't miss that attribute in the check below!
+ def.is_struct() && !is_npo(def)
+ });
+ Ok(match inner.ty.kind() {
+ ty::Ref(..) | ty::FnPtr(..) => {
+ // Option<&T> behaves like &T, and same for fn()
+ inner
}
- // We also compare the sign extension mode -- this could let the callee make assumptions
- // about bits that conceptually were not even passed.
- if a1.arg_ext != a2.arg_ext {
- return false;
+ ty::Adt(def, _) if is_npo(*def) => {
+ // Once we found a `nonnull_optimization_guaranteed` type, further strip off
+ // newtype structs from it to find the underlying ABI type.
+ self.unfold_transparent(inner, /* may_unfold */ |def| def.is_struct())
}
- return true;
- };
- let mode_compat = || match (&caller_abi.mode, &callee_abi.mode) {
- (PassMode::Ignore, PassMode::Ignore) => true,
- (PassMode::Direct(a1), PassMode::Direct(a2)) => arg_attr_compat(a1, a2),
- (PassMode::Pair(a1, b1), PassMode::Pair(a2, b2)) => {
- arg_attr_compat(a1, a2) && arg_attr_compat(b1, b2)
+ _ => {
+ // Everything else we do not unfold.
+ layout
}
- (PassMode::Cast(c1, pad1), PassMode::Cast(c2, pad2)) => c1 == c2 && pad1 == pad2,
- (
- PassMode::Indirect { attrs: a1, extra_attrs: None, on_stack: s1 },
- PassMode::Indirect { attrs: a2, extra_attrs: None, on_stack: s2 },
- ) => arg_attr_compat(a1, a2) && s1 == s2,
- (
- PassMode::Indirect { attrs: a1, extra_attrs: Some(e1), on_stack: s1 },
- PassMode::Indirect { attrs: a2, extra_attrs: Some(e2), on_stack: s2 },
- ) => arg_attr_compat(a1, a2) && arg_attr_compat(e1, e2) && s1 == s2,
- _ => false,
+ })
+ }
+
+ /// Check if these two layouts look like they are fn-ABI-compatible.
+ /// (We also compare the `PassMode`, so this doesn't have to check everything. But it turns out
+ /// that only checking the `PassMode` is insufficient.)
+ fn layout_compat(
+ &self,
+ caller: TyAndLayout<'tcx>,
+ callee: TyAndLayout<'tcx>,
+ ) -> InterpResult<'tcx, bool> {
+ // Fast path: equal types are definitely compatible.
+ if caller.ty == callee.ty {
+ return Ok(true);
+ }
+ // 1-ZST are compatible with all 1-ZST (and with nothing else).
+ if caller.is_1zst() || callee.is_1zst() {
+ return Ok(caller.is_1zst() && callee.is_1zst());
+ }
+ // Unfold newtypes and NPO optimizations.
+ let unfold = |layout: TyAndLayout<'tcx>| {
+ self.unfold_npo(self.unfold_transparent(layout, /* may_unfold */ |_def| true))
};
+ let caller = unfold(caller)?;
+ let callee = unfold(callee)?;
+ // Now see if these inner types are compatible.
+
+ // Compatible pointer types. For thin pointers, we have to accept even non-`repr(transparent)`
+ // things as compatible due to `DispatchFromDyn`. For instance, `Rc<i32>` and `*mut i32`
+ // must be compatible. So we just accept everything with Pointer ABI as compatible,
+ // even if this will accept some code that is not stably guaranteed to work.
+ // This also handles function pointers.
+ let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.abi {
+ abi::Abi::Scalar(s) => match s.primitive() {
+ abi::Primitive::Pointer(addr_space) => Some(addr_space),
+ _ => None,
+ },
+ _ => None,
+ };
+ if let (Some(caller), Some(callee)) = (thin_pointer(caller), thin_pointer(callee)) {
+ return Ok(caller == callee);
+ }
+ // For wide pointers we have to get the pointee type.
+ let pointee_ty = |ty: Ty<'tcx>| -> InterpResult<'tcx, Option<Ty<'tcx>>> {
+ // We cannot use `builtin_deref` here since we need to reject `Box<T, MyAlloc>`.
+ Ok(Some(match ty.kind() {
+ ty::Ref(_, ty, _) => *ty,
+ ty::RawPtr(mt) => mt.ty,
+ // We should only accept `Box` with the default allocator.
+ // It's hard to test for that though so we accept every 1-ZST allocator.
+ ty::Adt(def, args)
+ if def.is_box()
+ && self.layout_of(args[1].expect_ty()).is_ok_and(|l| l.is_1zst()) =>
+ {
+ args[0].expect_ty()
+ }
+ _ => return Ok(None),
+ }))
+ };
+ if let (Some(caller), Some(callee)) = (pointee_ty(caller.ty)?, pointee_ty(callee.ty)?) {
+ // This is okay if they have the same metadata type.
+ let meta_ty = |ty: Ty<'tcx>| {
+ let (meta, only_if_sized) = ty.ptr_metadata_ty(*self.tcx, |ty| ty);
+ assert!(
+ !only_if_sized,
+ "there should be no more 'maybe has that metadata' types during interpretation"
+ );
+ meta
+ };
+ return Ok(meta_ty(caller) == meta_ty(callee));
+ }
+
+ // Compatible integer types (in particular, usize vs ptr-sized-u32/u64).
+ let int_ty = |ty: Ty<'tcx>| {
+ Some(match ty.kind() {
+ ty::Int(ity) => (Integer::from_int_ty(&self.tcx, *ity), /* signed */ true),
+ ty::Uint(uty) => (Integer::from_uint_ty(&self.tcx, *uty), /* signed */ false),
+ _ => return None,
+ })
+ };
+ if let (Some(caller), Some(callee)) = (int_ty(caller.ty), int_ty(callee.ty)) {
+ // This is okay if they are the same integer type.
+ return Ok(caller == callee);
+ }
+
+ // Fall back to exact equality.
+ // FIXME: We are missing the rules for "repr(C) wrapping compatible types".
+ Ok(caller == callee)
+ }
- if layout_compat() && mode_compat() {
- return true;
+ fn check_argument_compat(
+ &self,
+ caller_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ ) -> InterpResult<'tcx, bool> {
+ // We do not want to accept things as ABI-compatible that just "happen to be" compatible on the current target,
+ // so we implement a type-based check that reflects the guaranteed rules for ABI compatibility.
+ if self.layout_compat(caller_abi.layout, callee_abi.layout)? {
+ // Ensure that our checks imply actual ABI compatibility for this concrete call.
+ assert!(caller_abi.eq_abi(&callee_abi));
+ return Ok(true);
+ } else {
+ trace!(
+ "check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}",
+ caller_abi,
+ callee_abi
+ );
+ return Ok(false);
}
- trace!(
- "check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}",
- caller_abi,
- callee_abi
- );
- return false;
}
/// Initialize a single callee argument, checking the types for compatibility.
@@ -345,63 +430,58 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Item = (&'x FnArg<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>),
>,
callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
- callee_arg: &PlaceTy<'tcx, M::Provenance>,
+ callee_arg: &mir::Place<'tcx>,
+ callee_ty: Ty<'tcx>,
+ already_live: bool,
) -> InterpResult<'tcx>
where
'tcx: 'x,
'tcx: 'y,
{
+ assert_eq!(callee_ty, callee_abi.layout.ty);
if matches!(callee_abi.mode, PassMode::Ignore) {
- // This one is skipped.
+ // This one is skipped. Still must be made live though!
+ if !already_live {
+ self.storage_live(callee_arg.as_local().unwrap())?;
+ }
return Ok(());
}
// Find next caller arg.
let Some((caller_arg, caller_abi)) = caller_args.next() else {
throw_ub_custom!(fluent::const_eval_not_enough_caller_args);
};
- // Now, check
- if !Self::check_argument_compat(caller_abi, callee_abi) {
- let callee_ty = format!("{}", callee_arg.layout.ty);
- let caller_ty = format!("{}", caller_arg.layout().ty);
- throw_ub_custom!(
- fluent::const_eval_incompatible_types,
- callee_ty = callee_ty,
- caller_ty = caller_ty,
- )
+ assert_eq!(caller_arg.layout().layout, caller_abi.layout.layout);
+ // Sadly we cannot assert that `caller_arg.layout().ty` and `caller_abi.layout.ty` are
+ // equal; in closures the types sometimes differ. We just hope that `caller_abi` is the
+ // right type to print to the user.
+
+ // Check compatibility
+ if !self.check_argument_compat(caller_abi, callee_abi)? {
+ throw_ub!(AbiMismatchArgument {
+ caller_ty: caller_abi.layout.ty,
+ callee_ty: callee_abi.layout.ty
+ });
}
// We work with a copy of the argument for now; if this is in-place argument passing, we
// will later protect the source it comes from. This means the callee cannot observe if we
// did in-place of by-copy argument passing, except for pointer equality tests.
let caller_arg_copy = self.copy_fn_arg(&caller_arg)?;
- // Special handling for unsized parameters.
- if caller_arg_copy.layout.is_unsized() {
- // `check_argument_compat` ensures that both have the same type, so we know they will use the metadata the same way.
- assert_eq!(caller_arg_copy.layout.ty, callee_arg.layout.ty);
- // We have to properly pre-allocate the memory for the callee.
- // So let's tear down some abstractions.
- // This all has to be in memory, there are no immediate unsized values.
- let src = caller_arg_copy.assert_mem_place();
- // The destination cannot be one of these "spread args".
- let (dest_frame, dest_local, dest_offset) = callee_arg
- .as_mplace_or_local()
- .right()
- .expect("callee fn arguments must be locals");
- // We are just initializing things, so there can't be anything here yet.
- assert!(matches!(
- *self.local_to_op(&self.stack()[dest_frame], dest_local, None)?,
- Operand::Immediate(Immediate::Uninit)
- ));
- assert_eq!(dest_offset, None);
- // Allocate enough memory to hold `src`.
- let dest_place = self.allocate_dyn(src.layout, MemoryKind::Stack, src.meta)?;
- // Update the local to be that new place.
- *M::access_local_mut(self, dest_frame, dest_local)? = Operand::Indirect(*dest_place);
+ if !already_live {
+ let local = callee_arg.as_local().unwrap();
+ let meta = caller_arg_copy.meta();
+ // `check_argument_compat` ensures that if metadata is needed, both have the same type,
+ // so we know they will use the metadata the same way.
+ assert!(!meta.has_meta() || caller_arg_copy.layout.ty == callee_ty);
+
+ self.storage_live_dyn(local, meta)?;
}
+ // Now we can finally actually evaluate the callee place.
+ let callee_arg = self.eval_place(*callee_arg)?;
// We allow some transmutes here.
// FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This
// is true for all `copy_op`, but there are a lot of special cases for argument passing
// specifically.)
- self.copy_op(&caller_arg_copy, callee_arg, /*allow_transmute*/ true)?;
+ self.copy_op(&caller_arg_copy, &callee_arg, /*allow_transmute*/ true)?;
// If this was an in-place pass, protect the place it comes from for the duration of the call.
if let FnArg::InPlace(place) = caller_arg {
M::protect_in_place_function_argument(self, place)?;
@@ -584,21 +664,50 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// taking into account the `spread_arg`. If we could write
// this is a single iterator (that handles `spread_arg`), then
// `pass_argument` would be the loop body. It takes care to
- // not advance `caller_iter` for ZSTs.
+ // not advance `caller_iter` for ignored arguments.
let mut callee_args_abis = callee_fn_abi.args.iter();
for local in body.args_iter() {
- let dest = self.eval_place(mir::Place::from(local))?;
+ // Construct the destination place for this argument. At this point all
+ // locals are still dead, so we cannot construct a `PlaceTy`.
+ let dest = mir::Place::from(local);
+ // `layout_of_local` does more than just the substitution we need to get the
+ // type, but the result gets cached so this avoids calling the substitution
+ // query *again* the next time this local is accessed.
+ let ty = self.layout_of_local(self.frame(), local, None)?.ty;
if Some(local) == body.spread_arg {
+ // Make the local live once, then fill in the value field by field.
+ self.storage_live(local)?;
// Must be a tuple
- for i in 0..dest.layout.fields.count() {
- let dest = self.project_field(&dest, i)?;
+ let ty::Tuple(fields) = ty.kind() else {
+ span_bug!(self.cur_span(), "non-tuple type for `spread_arg`: {ty}")
+ };
+ for (i, field_ty) in fields.iter().enumerate() {
+ let dest = dest.project_deeper(
+ &[mir::ProjectionElem::Field(
+ FieldIdx::from_usize(i),
+ field_ty,
+ )],
+ *self.tcx,
+ );
let callee_abi = callee_args_abis.next().unwrap();
- self.pass_argument(&mut caller_args, callee_abi, &dest)?;
+ self.pass_argument(
+ &mut caller_args,
+ callee_abi,
+ &dest,
+ field_ty,
+ /* already_live */ true,
+ )?;
}
} else {
- // Normal argument
+ // Normal argument. Cannot mark it as live yet, it might be unsized!
let callee_abi = callee_args_abis.next().unwrap();
- self.pass_argument(&mut caller_args, callee_abi, &dest)?;
+ self.pass_argument(
+ &mut caller_args,
+ callee_abi,
+ &dest,
+ ty,
+ /* already_live */ false,
+ )?;
}
}
// If the callee needs a caller location, pretend we consume one more argument from the ABI.
@@ -614,14 +723,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
throw_ub_custom!(fluent::const_eval_too_many_caller_args);
}
// Don't forget to check the return type!
- if !Self::check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret) {
- let callee_ty = format!("{}", callee_fn_abi.ret.layout.ty);
- let caller_ty = format!("{}", caller_fn_abi.ret.layout.ty);
- throw_ub_custom!(
- fluent::const_eval_incompatible_return_types,
- callee_ty = callee_ty,
- caller_ty = caller_ty,
- )
+ if !self.check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret)? {
+ throw_ub!(AbiMismatchReturn {
+ caller_ty: caller_fn_abi.ret.layout.ty,
+ callee_ty: callee_fn_abi.ret.layout.ty
+ });
}
// Ensure the return place is aligned and dereferenceable, and protect it for
// in-place return value passing.
@@ -631,6 +737,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Nothing to do for locals, they are always properly allocated and aligned.
}
M::protect_in_place_function_argument(self, destination)?;
+
+ // Don't forget to mark "initially live" locals as live.
+ self.storage_live_for_always_live_locals()?;
};
match res {
Err(err) => {
@@ -640,7 +749,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(()) => Ok(()),
}
}
- // cannot use the shim here, because that will only result in infinite recursion
+ // `InstanceDef::Virtual` does not have callable MIR. Calls to `Virtual` instances must be
+ // codegen'd / interpreted as virtual calls through the vtable.
ty::InstanceDef::Virtual(def_id, idx) => {
let mut args = args.to_vec();
// We have to implement all "object safe receivers". So we have to go search for a
@@ -671,26 +781,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
_ => {
// Not there yet, search for the only non-ZST field.
- let mut non_zst_field = None;
- for i in 0..receiver.layout.fields.count() {
- let field = self.project_field(&receiver, i)?;
- let zst =
- field.layout.is_zst() && field.layout.align.abi.bytes() == 1;
- if !zst {
- assert!(
- non_zst_field.is_none(),
- "multiple non-ZST fields in dyn receiver type {}",
- receiver.layout.ty
- );
- non_zst_field = Some(field);
- }
- }
- receiver = non_zst_field.unwrap_or_else(|| {
- panic!(
- "no non-ZST fields in dyn receiver type {}",
- receiver.layout.ty
- )
- });
+ // (The rules for `DispatchFromDyn` ensure there's exactly one such field.)
+ let (idx, _) = receiver.layout.non_1zst_field(self).expect(
+ "not exactly one non-1-ZST field in a `DispatchFromDyn` type",
+ );
+ receiver = self.project_field(&receiver, idx)?;
}
}
};
@@ -705,7 +800,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
throw_ub_custom!(fluent::const_eval_dyn_star_call_vtable_mismatch);
}
- (vptr, dyn_ty, recv.ptr)
+ (vptr, dyn_ty, recv.ptr())
} else {
// Doesn't have to be a `dyn Trait`, but the unsized tail must be `dyn Trait`.
// (For that reason we also cannot use `unpack_dyn_trait`.)
@@ -722,7 +817,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert!(receiver_place.layout.is_unsized());
// Get the required information from the vtable.
- let vptr = receiver_place.meta.unwrap_meta().to_pointer(self)?;
+ let vptr = receiver_place.meta().unwrap_meta().to_pointer(self)?;
let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
if dyn_trait != data.principal() {
throw_ub_custom!(fluent::const_eval_dyn_call_vtable_mismatch);
@@ -731,7 +826,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// It might be surprising that we use a pointer as the receiver even if this
// is a by-val case; this works because by-val passing of an unsized `dyn
// Trait` to a function is actually desugared to a pointer.
- (vptr, dyn_ty, receiver_place.ptr)
+ (vptr, dyn_ty, receiver_place.ptr())
};
// Now determine the actual method to call. We can do that in two different ways and
@@ -764,18 +859,26 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
// Adjust receiver argument. Layout can be any (thin) ptr.
+ let receiver_ty = Ty::new_mut_ptr(self.tcx.tcx, dyn_ty);
args[0] = FnArg::Copy(
ImmTy::from_immediate(
Scalar::from_maybe_pointer(adjusted_receiver, self).into(),
- self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, dyn_ty))?,
+ self.layout_of(receiver_ty)?,
)
.into(),
);
trace!("Patched receiver operand to {:#?}", args[0]);
+ // Need to also adjust the type in the ABI. Strangely, the layout there is actually
+ // already fine! Just the type is bogus. This is due to what `force_thin_self_ptr`
+ // does in `fn_abi_new_uncached`; supposedly, codegen relies on having the bogus
+ // type, so we just patch this up locally.
+ let mut caller_fn_abi = caller_fn_abi.clone();
+ caller_fn_abi.args[0].layout.ty = receiver_ty;
+
// recurse with concrete function
self.eval_fn_call(
FnVal::Instance(fn_inst),
- (caller_abi, caller_fn_abi),
+ (caller_abi, &caller_fn_abi),
&args,
with_caller_location,
destination,
@@ -818,7 +921,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
target: mir::BasicBlock,
unwind: mir::UnwindAction,
) -> InterpResult<'tcx> {
- trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance);
+ trace!("drop_in_place: {:?},\n instance={:?}", place, instance);
// We take the address of the object. This may well be unaligned, which is fine
// for us here. However, unaligned accesses will probably make the actual drop
// implementation fail -- a problem shared by rustc.
diff --git a/compiler/rustc_const_eval/src/interpret/traits.rs b/compiler/rustc_const_eval/src/interpret/traits.rs
index fa15d466a..a9ca268a2 100644
--- a/compiler/rustc_const_eval/src/interpret/traits.rs
+++ b/compiler/rustc_const_eval/src/interpret/traits.rs
@@ -27,7 +27,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ensure_monomorphic_enough(*self.tcx, ty)?;
ensure_monomorphic_enough(*self.tcx, poly_trait_ref)?;
- let vtable_symbolic_allocation = self.tcx.create_vtable_alloc(ty, poly_trait_ref);
+ let vtable_symbolic_allocation = self.tcx.reserve_and_set_vtable_alloc(ty, poly_trait_ref);
let vtable_ptr = self.global_base_pointer(Pointer::from(vtable_symbolic_allocation))?;
Ok(vtable_ptr.into())
}
diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs
index b33194423..eb639ded7 100644
--- a/compiler/rustc_const_eval/src/interpret/util.rs
+++ b/compiler/rustc_const_eval/src/interpret/util.rs
@@ -4,7 +4,7 @@ use rustc_middle::ty::{
};
use std::ops::ControlFlow;
-/// Checks whether a type contains generic parameters which require substitution.
+/// Checks whether a type contains generic parameters which must be instantiated.
///
/// In case it does, returns a `TooGeneric` const eval error. Note that due to polymorphization
/// types may be "concrete enough" even though they still contain generic parameters in
@@ -43,7 +43,8 @@ where
.try_into()
.expect("more generic parameters than can fit into a `u32`");
// Only recurse when generic parameters in fns, closures and generators
- // are used and require substitution.
+ // are used and have to be instantiated.
+ //
// Just in case there are closures or generators within this subst,
// recurse.
if unused_params.is_used(index) && subst.has_param() {
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index d3f05af1c..3e023a896 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -360,7 +360,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// Handle wide pointers.
// Check metadata early, for better diagnostics
if place.layout.is_unsized() {
- self.check_wide_ptr_meta(place.meta, place.layout)?;
+ self.check_wide_ptr_meta(place.meta(), place.layout)?;
}
// Make sure this is dereferenceable and all.
let size_and_align = try_validation!(
@@ -379,7 +379,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
try_validation!(
self.ecx.check_ptr_access_align(
- place.ptr,
+ place.ptr(),
size,
align,
CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
@@ -414,7 +414,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
if let Some(ref_tracking) = self.ref_tracking.as_deref_mut() {
// Proceed recursively even for ZST, no reason to skip them!
// `!` is a ZST and we want to validate it.
- if let Ok((alloc_id, _offset, _prov)) = self.ecx.ptr_try_get_alloc_id(place.ptr) {
+ if let Ok((alloc_id, _offset, _prov)) = self.ecx.ptr_try_get_alloc_id(place.ptr()) {
// Let's see what kind of memory this points to.
let alloc_kind = self.ecx.tcx.try_get_global_alloc(alloc_id);
match alloc_kind {
@@ -521,7 +521,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let place =
self.ecx.ref_to_mplace(&self.read_immediate(value, ExpectedKind::RawPtr)?)?;
if place.layout.is_unsized() {
- self.check_wide_ptr_meta(place.meta, place.layout)?;
+ self.check_wide_ptr_meta(place.meta(), place.layout)?;
}
Ok(true)
}
@@ -583,7 +583,6 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
| ty::Bound(..)
| ty::Param(..)
| ty::Alias(..)
- | ty::GeneratorWitnessMIR(..)
| ty::GeneratorWitness(..) => bug!("Encountered invalid type {:?}", ty),
}
}
@@ -739,7 +738,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
let mplace = op.assert_mem_place(); // strings are unsized and hence never immediate
let len = mplace.len(self.ecx)?;
try_validation!(
- self.ecx.read_bytes_ptr_strip_provenance(mplace.ptr, Size::from_bytes(len)),
+ self.ecx.read_bytes_ptr_strip_provenance(mplace.ptr(), Size::from_bytes(len)),
self.path,
Ub(InvalidUninitBytes(..)) => Uninit { expected: ExpectedKind::Str },
Unsup(ReadPointerAsInt(_)) => PointerAsInt { expected: ExpectedKind::Str }
@@ -789,7 +788,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// to reject those pointers, we just do not have the machinery to
// talk about parts of a pointer.
// We also accept uninit, for consistency with the slow path.
- let alloc = self.ecx.get_ptr_alloc(mplace.ptr, size, mplace.align)?.expect("we already excluded size 0");
+ let alloc = self.ecx.get_ptr_alloc(mplace.ptr(), size, mplace.align)?.expect("we already excluded size 0");
match alloc.get_bytes_strip_provenance() {
// In the happy case, we needn't check anything else.
@@ -911,9 +910,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Complain about any other kind of error -- those are bad because we'd like to
// report them in a way that shows *where* in the value the issue lies.
Err(err) => {
- let (err, backtrace) = err.into_parts();
- backtrace.print_backtrace();
- bug!("Unexpected Undefined Behavior error during validation: {err:?}");
+ bug!(
+ "Unexpected Undefined Behavior error during validation: {}",
+ self.format_error(err)
+ );
}
}
}
diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs
index 531e2bd3e..fc21ad1f1 100644
--- a/compiler/rustc_const_eval/src/interpret/visitor.rs
+++ b/compiler/rustc_const_eval/src/interpret/visitor.rs
@@ -170,8 +170,9 @@ pub trait ValueVisitor<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
}
}
FieldsShape::Array { .. } => {
- for (idx, field) in self.ecx().project_array_fields(v)?.enumerate() {
- self.visit_field(v, idx, &field?)?;
+ let mut iter = self.ecx().project_array_fields(v)?;
+ while let Some((idx, field)) = iter.next(self.ecx())? {
+ self.visit_field(v, idx.try_into().unwrap(), &field)?;
}
}
}
diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs
index c126f749b..8bb409cea 100644
--- a/compiler/rustc_const_eval/src/lib.rs
+++ b/compiler/rustc_const_eval/src/lib.rs
@@ -38,8 +38,7 @@ pub use errors::ReportErrorExt;
use rustc_errors::{DiagnosticMessage, SubdiagnosticMessage};
use rustc_fluent_macro::fluent_messages;
-use rustc_middle::query::Providers;
-use rustc_middle::ty;
+use rustc_middle::{ty, util::Providers};
fluent_messages! { "../messages.ftl" }
@@ -52,8 +51,8 @@ pub fn provide(providers: &mut Providers) {
let (param_env, raw) = param_env_and_value.into_parts();
const_eval::eval_to_valtree(tcx, param_env, raw)
};
- providers.try_destructure_mir_constant_for_diagnostics =
- |tcx, (cv, ty)| const_eval::try_destructure_mir_constant_for_diagnostics(tcx, cv, ty);
+ providers.hooks.try_destructure_mir_constant_for_diagnostics =
+ const_eval::try_destructure_mir_constant_for_diagnostics;
providers.valtree_to_const_val = |tcx, (ty, valtree)| {
const_eval::valtree_to_const_value(tcx, ty::ParamEnv::empty().and(ty), valtree)
};
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
index fae047bff..8c2346c4e 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/check.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
@@ -167,7 +167,7 @@ impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
false
}
- hir::ConstContext::Const | hir::ConstContext::Static(_) => {
+ hir::ConstContext::Const { .. } | hir::ConstContext::Static(_) => {
let mut cursor = FlowSensitiveAnalysis::new(CustomEq, ccx)
.into_engine(ccx.tcx, &ccx.body)
.iterate_to_fixpoint()
@@ -415,8 +415,8 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
BorrowKind::Shared => {
PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow)
}
- BorrowKind::Shallow => {
- PlaceContext::NonMutatingUse(NonMutatingUseContext::ShallowBorrow)
+ BorrowKind::Fake => {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::FakeBorrow)
}
BorrowKind::Mut { .. } => {
PlaceContext::MutatingUse(MutatingUseContext::Borrow)
@@ -491,7 +491,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
self.check_mut_borrow(place.local, hir::BorrowKind::Raw)
}
- Rvalue::Ref(_, BorrowKind::Shared | BorrowKind::Shallow, place)
+ Rvalue::Ref(_, BorrowKind::Shared | BorrowKind::Fake, place)
| Rvalue::AddressOf(Mutability::Not, place) => {
let borrowed_place_has_mut_interior = qualifs::in_place::<HasMutInterior, _>(
&self.ccx,
@@ -664,6 +664,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
| ProjectionElem::Downcast(..)
| ProjectionElem::OpaqueCast(..)
| ProjectionElem::Subslice { .. }
+ | ProjectionElem::Subtype(..)
| ProjectionElem::Field(..)
| ProjectionElem::Index(_) => {}
}
@@ -1037,7 +1038,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
self.check_op(ops::Generator(hir::GeneratorKind::Gen))
}
- TerminatorKind::Terminate => {
+ TerminatorKind::UnwindTerminate(_) => {
// Cleanup blocks are skipped for const checking (see `visit_basic_block_data`).
span_bug!(self.span, "`Terminate` terminator outside of cleanup block")
}
@@ -1046,7 +1047,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
| TerminatorKind::FalseEdge { .. }
| TerminatorKind::FalseUnwind { .. }
| TerminatorKind::Goto { .. }
- | TerminatorKind::Resume
+ | TerminatorKind::UnwindResume
| TerminatorKind::Return
| TerminatorKind::SwitchInt { .. }
| TerminatorKind::Unreachable => {}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
index e3377bd10..fd6bc2ee9 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
@@ -106,7 +106,7 @@ impl<'tcx> Visitor<'tcx> for CheckLiveDrops<'_, 'tcx> {
}
}
- mir::TerminatorKind::Terminate
+ mir::TerminatorKind::UnwindTerminate(_)
| mir::TerminatorKind::Call { .. }
| mir::TerminatorKind::Assert { .. }
| mir::TerminatorKind::FalseEdge { .. }
@@ -114,7 +114,7 @@ impl<'tcx> Visitor<'tcx> for CheckLiveDrops<'_, 'tcx> {
| mir::TerminatorKind::GeneratorDrop
| mir::TerminatorKind::Goto { .. }
| mir::TerminatorKind::InlineAsm { .. }
- | mir::TerminatorKind::Resume
+ | mir::TerminatorKind::UnwindResume
| mir::TerminatorKind::Return
| mir::TerminatorKind::SwitchInt { .. }
| mir::TerminatorKind::Unreachable
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
index b1b2859ef..de3186a53 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
@@ -306,6 +306,7 @@ where
ProjectionElem::Index(index) if in_local(index) => return true,
ProjectionElem::Deref
+ | ProjectionElem::Subtype(_)
| ProjectionElem::Field(_, _)
| ProjectionElem::OpaqueCast(_)
| ProjectionElem::ConstantIndex { .. }
@@ -346,8 +347,8 @@ where
};
// Check the qualifs of the value of `const` items.
- let uneval = match constant.literal {
- ConstantKind::Ty(ct)
+ let uneval = match constant.const_ {
+ Const::Ty(ct)
if matches!(
ct.kind(),
ty::ConstKind::Param(_) | ty::ConstKind::Error(_) | ty::ConstKind::Value(_)
@@ -355,11 +356,11 @@ where
{
None
}
- ConstantKind::Ty(c) => {
+ Const::Ty(c) => {
bug!("expected ConstKind::Param or ConstKind::Value here, found {:?}", c)
}
- ConstantKind::Unevaluated(uv, _) => Some(uv),
- ConstantKind::Val(..) => None,
+ Const::Unevaluated(uv, _) => Some(uv),
+ Const::Val(..) => None,
};
if let Some(mir::UnevaluatedConst { def, args: _, promoted }) = uneval {
@@ -383,5 +384,5 @@ where
}
// Otherwise use the qualifs of the type.
- Q::in_any_value_of_ty(cx, constant.literal.ty())
+ Q::in_any_value_of_ty(cx, constant.const_.ty())
}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
index a137f84b7..a23922c77 100644
--- a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
+++ b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
@@ -105,7 +105,7 @@ where
fn ref_allows_mutation(&self, kind: mir::BorrowKind, place: mir::Place<'tcx>) -> bool {
match kind {
mir::BorrowKind::Mut { .. } => true,
- mir::BorrowKind::Shared | mir::BorrowKind::Shallow => {
+ mir::BorrowKind::Shared | mir::BorrowKind::Fake => {
self.shared_borrow_allows_mutation(place)
}
}
diff --git a/compiler/rustc_const_eval/src/transform/promote_consts.rs b/compiler/rustc_const_eval/src/transform/promote_consts.rs
index d79c65f1d..5d8b1956a 100644
--- a/compiler/rustc_const_eval/src/transform/promote_consts.rs
+++ b/compiler/rustc_const_eval/src/transform/promote_consts.rs
@@ -357,7 +357,9 @@ impl<'tcx> Validator<'_, 'tcx> {
return Err(Unpromotable);
}
- ProjectionElem::ConstantIndex { .. } | ProjectionElem::Subslice { .. } => {}
+ ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subtype(_)
+ | ProjectionElem::Subslice { .. } => {}
ProjectionElem::Index(local) => {
let mut promotable = false;
@@ -372,7 +374,7 @@ impl<'tcx> Validator<'_, 'tcx> {
StatementKind::Assign(box (
_,
Rvalue::Use(Operand::Constant(c)),
- )) => c.literal.try_eval_target_usize(self.tcx, self.param_env),
+ )) => c.const_.try_eval_target_usize(self.tcx, self.param_env),
_ => None,
}
} else {
@@ -454,7 +456,7 @@ impl<'tcx> Validator<'_, 'tcx> {
match kind {
// Reject these borrow types just to be safe.
// FIXME(RalfJung): could we allow them? Should we? No point in it until we have a usecase.
- BorrowKind::Shallow | BorrowKind::Mut { kind: MutBorrowKind::ClosureCapture } => {
+ BorrowKind::Fake | BorrowKind::Mut { kind: MutBorrowKind::ClosureCapture } => {
return Err(Unpromotable);
}
@@ -554,7 +556,7 @@ impl<'tcx> Validator<'_, 'tcx> {
// Integer division: the RHS must be a non-zero const.
let const_val = match rhs {
Operand::Constant(c) => {
- c.literal.try_eval_bits(self.tcx, self.param_env, lhs_ty)
+ c.const_.try_eval_bits(self.tcx, self.param_env)
}
_ => None,
};
@@ -644,7 +646,7 @@ impl<'tcx> Validator<'_, 'tcx> {
// Everywhere else, we require `#[rustc_promotable]` on the callee.
let promote_all_const_fn = matches!(
self.const_kind,
- Some(hir::ConstContext::Static(_) | hir::ConstContext::Const)
+ Some(hir::ConstContext::Static(_) | hir::ConstContext::Const { inline: false })
);
if !promote_all_const_fn {
if let ty::FnDef(def_id, _) = *fn_ty.kind() {
@@ -766,10 +768,10 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
if self.keep_original {
rhs.clone()
} else {
- let unit = Rvalue::Use(Operand::Constant(Box::new(Constant {
+ let unit = Rvalue::Use(Operand::Constant(Box::new(ConstOperand {
span: statement.source_info.span,
user_ty: None,
- literal: ConstantKind::zero_sized(self.tcx.types.unit),
+ const_: Const::zero_sized(self.tcx.types.unit),
})));
mem::replace(rhs, unit)
},
@@ -844,10 +846,10 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> {
let args = tcx.erase_regions(GenericArgs::identity_for_item(tcx, def));
let uneval = mir::UnevaluatedConst { def, args, promoted: Some(promoted_id) };
- Operand::Constant(Box::new(Constant {
+ Operand::Constant(Box::new(ConstOperand {
span,
user_ty: None,
- literal: ConstantKind::Unevaluated(uneval, ty),
+ const_: Const::Unevaluated(uneval, ty),
}))
};
@@ -1041,8 +1043,8 @@ pub fn is_const_fn_in_array_repeat_expression<'tcx>(
if let Some(Terminator { kind: TerminatorKind::Call { func, destination, .. }, .. }) =
&block.terminator
{
- if let Operand::Constant(box Constant { literal, .. }) = func {
- if let ty::FnDef(def_id, _) = *literal.ty().kind() {
+ if let Operand::Constant(box ConstOperand { const_, .. }) = func {
+ if let ty::FnDef(def_id, _) = *const_.ty().kind() {
if destination == place {
if ccx.tcx.is_const_fn(def_id) {
return true;
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
index 783b52d00..ec1bc20ed 100644
--- a/compiler/rustc_const_eval/src/transform/validate.rs
+++ b/compiler/rustc_const_eval/src/transform/validate.rs
@@ -6,20 +6,18 @@ use rustc_index::IndexVec;
use rustc_infer::traits::Reveal;
use rustc_middle::mir::interpret::Scalar;
use rustc_middle::mir::visit::{NonUseContext, PlaceContext, Visitor};
-use rustc_middle::mir::{
- traversal, BasicBlock, BinOp, Body, BorrowKind, CastKind, CopyNonOverlapping, Local, Location,
- MirPass, MirPhase, NonDivergingIntrinsic, NullOp, Operand, Place, PlaceElem, PlaceRef,
- ProjectionElem, RetagKind, RuntimePhase, Rvalue, SourceScope, Statement, StatementKind,
- Terminator, TerminatorKind, UnOp, UnwindAction, VarDebugInfo, VarDebugInfoContents,
- START_BLOCK,
-};
-use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitableExt};
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitableExt, Variance};
use rustc_mir_dataflow::impls::MaybeStorageLive;
use rustc_mir_dataflow::storage::always_storage_live_locals;
use rustc_mir_dataflow::{Analysis, ResultsCursor};
use rustc_target::abi::{Size, FIRST_VARIANT};
use rustc_target::spec::abi::Abi;
+use crate::util::is_within_packed;
+
+use crate::util::relate_types;
+
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum EdgeKind {
Unwind,
@@ -93,6 +91,7 @@ impl<'tcx> MirPass<'tcx> for Validator {
cfg_checker.visit_body(body);
cfg_checker.check_cleanup_control_flow();
+ // Also run the TypeChecker.
for (location, msg) in validate_types(tcx, self.mir_phase, param_env, body) {
cfg_checker.fail(location, msg);
}
@@ -274,7 +273,16 @@ impl<'a, 'tcx> CfgChecker<'a, 'tcx> {
self.fail(location, "`UnwindAction::Continue` in no-unwind function");
}
}
- UnwindAction::Unreachable | UnwindAction::Terminate => (),
+ UnwindAction::Terminate(UnwindTerminateReason::InCleanup) => {
+ if !is_cleanup {
+ self.fail(
+ location,
+ "`UnwindAction::Terminate(InCleanup)` in a non-cleanup block",
+ );
+ }
+ }
+ // These are allowed everywhere.
+ UnwindAction::Unreachable | UnwindAction::Terminate(UnwindTerminateReason::Abi) => (),
}
}
}
@@ -418,14 +426,34 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> {
self.check_unwind_edge(location, *unwind);
// The call destination place and Operand::Move place used as an argument might be
- // passed by a reference to the callee. Consequently they must be non-overlapping.
- // Currently this simply checks for duplicate places.
+ // passed by a reference to the callee. Consequently they must be non-overlapping
+ // and cannot be packed. Currently this simply checks for duplicate places.
self.place_cache.clear();
self.place_cache.insert(destination.as_ref());
+ if is_within_packed(self.tcx, &self.body.local_decls, *destination).is_some() {
+ // This is bad! The callee will expect the memory to be aligned.
+ self.fail(
+ location,
+ format!(
+ "encountered packed place in `Call` terminator destination: {:?}",
+ terminator.kind,
+ ),
+ );
+ }
let mut has_duplicates = false;
for arg in args {
if let Operand::Move(place) = arg {
has_duplicates |= !self.place_cache.insert(place.as_ref());
+ if is_within_packed(self.tcx, &self.body.local_decls, *place).is_some() {
+ // This is bad! The callee will expect the memory to be aligned.
+ self.fail(
+ location,
+ format!(
+ "encountered `Move` of a packed place in `Call` terminator: {:?}",
+ terminator.kind,
+ ),
+ );
+ }
}
}
@@ -433,7 +461,7 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> {
self.fail(
location,
format!(
- "encountered overlapping memory in `Call` terminator: {:?}",
+ "encountered overlapping memory in `Move` arguments to `Call` terminator: {:?}",
terminator.kind,
),
);
@@ -492,19 +520,19 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> {
);
}
}
- TerminatorKind::Resume => {
+ TerminatorKind::UnwindResume => {
let bb = location.block;
if !self.body.basic_blocks[bb].is_cleanup {
- self.fail(location, "Cannot `Resume` from non-cleanup basic block")
+ self.fail(location, "Cannot `UnwindResume` from non-cleanup basic block")
}
if !self.can_unwind {
- self.fail(location, "Cannot `Resume` in a function that cannot unwind")
+ self.fail(location, "Cannot `UnwindResume` in a function that cannot unwind")
}
}
- TerminatorKind::Terminate => {
+ TerminatorKind::UnwindTerminate(_) => {
let bb = location.block;
if !self.body.basic_blocks[bb].is_cleanup {
- self.fail(location, "Cannot `Terminate` from non-cleanup basic block")
+ self.fail(location, "Cannot `UnwindTerminate` from non-cleanup basic block")
}
}
TerminatorKind::Return => {
@@ -532,6 +560,8 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> {
}
}
+/// A faster version of the validation pass that only checks those things which may break when
+/// instantiating any generic parameters.
pub fn validate_types<'tcx>(
tcx: TyCtxt<'tcx>,
mir_phase: MirPhase,
@@ -574,7 +604,15 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
return true;
}
- crate::util::is_subtype(self.tcx, self.param_env, src, dest)
+ // After borrowck subtyping should be fully explicit via
+ // `Subtype` projections.
+ let variance = if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
+ Variance::Invariant
+ } else {
+ Variance::Covariant
+ };
+
+ crate::util::relate_types(self.tcx, self.param_env, variance, src, dest)
}
}
@@ -605,6 +643,14 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
location: Location,
) {
match elem {
+ ProjectionElem::OpaqueCast(ty)
+ if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) =>
+ {
+ self.fail(
+ location,
+ format!("explicit opaque type cast to `{ty}` after `RevealAll`"),
+ )
+ }
ProjectionElem::Index(index) => {
let index_ty = self.body.local_decls[index].ty;
if index_ty != self.tcx.types.usize {
@@ -717,43 +763,60 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
}
}
+ ProjectionElem::Subtype(ty) => {
+ if !relate_types(
+ self.tcx,
+ self.param_env,
+ Variance::Covariant,
+ ty,
+ place_ref.ty(&self.body.local_decls, self.tcx).ty,
+ ) {
+ self.fail(
+ location,
+ format!(
+ "Failed subtyping {ty:#?} and {:#?}",
+ place_ref.ty(&self.body.local_decls, self.tcx).ty
+ ),
+ )
+ }
+ }
_ => {}
}
self.super_projection_elem(place_ref, elem, context, location);
}
fn visit_var_debug_info(&mut self, debuginfo: &VarDebugInfo<'tcx>) {
- let check_place = |this: &mut Self, place: Place<'_>| {
- if place.projection.iter().any(|p| !p.can_use_in_debuginfo()) {
- this.fail(
+ if let Some(box VarDebugInfoFragment { ty, ref projection }) = debuginfo.composite {
+ if ty.is_union() || ty.is_enum() {
+ self.fail(
START_BLOCK.start_location(),
- format!("illegal place {:?} in debuginfo for {:?}", place, debuginfo.name),
+ format!("invalid type {ty:?} in debuginfo for {:?}", debuginfo.name),
);
}
- };
+ if projection.is_empty() {
+ self.fail(
+ START_BLOCK.start_location(),
+ format!("invalid empty projection in debuginfo for {:?}", debuginfo.name),
+ );
+ }
+ if projection.iter().any(|p| !matches!(p, PlaceElem::Field(..))) {
+ self.fail(
+ START_BLOCK.start_location(),
+ format!(
+ "illegal projection {:?} in debuginfo for {:?}",
+ projection, debuginfo.name
+ ),
+ );
+ }
+ }
match debuginfo.value {
VarDebugInfoContents::Const(_) => {}
VarDebugInfoContents::Place(place) => {
- check_place(self, place);
- }
- VarDebugInfoContents::Composite { ty, ref fragments } => {
- for f in fragments {
- check_place(self, f.contents);
- if ty.is_union() || ty.is_enum() {
- self.fail(
- START_BLOCK.start_location(),
- format!("invalid type {ty:?} for composite debuginfo"),
- );
- }
- if f.projection.iter().any(|p| !matches!(p, PlaceElem::Field(..))) {
- self.fail(
- START_BLOCK.start_location(),
- format!(
- "illegal projection {:?} in debuginfo for {:?}",
- f.projection, debuginfo.name
- ),
- );
- }
+ if place.projection.iter().any(|p| !p.can_use_in_debuginfo()) {
+ self.fail(
+ START_BLOCK.start_location(),
+ format!("illegal place {:?} in debuginfo for {:?}", place, debuginfo.name),
+ );
}
}
}
@@ -785,11 +848,11 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
}
match rvalue {
Rvalue::Use(_) | Rvalue::CopyForDeref(_) | Rvalue::Aggregate(..) => {}
- Rvalue::Ref(_, BorrowKind::Shallow, _) => {
+ Rvalue::Ref(_, BorrowKind::Fake, _) => {
if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
self.fail(
location,
- "`Assign` statement with a `Shallow` borrow should have been removed in runtime MIR",
+ "`Assign` statement with a `Fake` borrow should have been removed in runtime MIR",
);
}
}
@@ -1052,6 +1115,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
// LHS and RHS of the assignment must have the same type.
let left_ty = dest.ty(&self.body.local_decls, self.tcx).ty;
let right_ty = rvalue.ty(&self.body.local_decls, self.tcx);
+
if !self.mir_assign_valid_types(right_ty, left_ty) {
self.fail(
location,
@@ -1232,8 +1296,8 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
| TerminatorKind::FalseUnwind { .. }
| TerminatorKind::InlineAsm { .. }
| TerminatorKind::GeneratorDrop
- | TerminatorKind::Resume
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindResume
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
| TerminatorKind::Unreachable => {}
}
diff --git a/compiler/rustc_const_eval/src/util/alignment.rs b/compiler/rustc_const_eval/src/util/alignment.rs
index 4f39dad20..2e0643afb 100644
--- a/compiler/rustc_const_eval/src/util/alignment.rs
+++ b/compiler/rustc_const_eval/src/util/alignment.rs
@@ -34,13 +34,14 @@ where
false
}
_ => {
+ // We cannot figure out the layout. Conservatively assume that this is disaligned.
debug!("is_disaligned({:?}) - true", place);
true
}
}
}
-fn is_within_packed<'tcx, L>(
+pub fn is_within_packed<'tcx, L>(
tcx: TyCtxt<'tcx>,
local_decls: &L,
place: Place<'tcx>,
diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
index 2d1970791..e9e0690f0 100644
--- a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
+++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs
@@ -54,7 +54,7 @@ fn might_permit_raw_init_strict<'tcx>(
if kind == ValidityRequirement::Zero {
cx.write_bytes_ptr(
- allocated.ptr,
+ allocated.ptr(),
std::iter::repeat(0_u8).take(ty.layout.size().bytes_usize()),
)
.expect("failed to write bytes for zero valid check");
diff --git a/compiler/rustc_const_eval/src/util/compare_types.rs b/compiler/rustc_const_eval/src/util/compare_types.rs
index 83376c8e9..265ca0c78 100644
--- a/compiler/rustc_const_eval/src/util/compare_types.rs
+++ b/compiler/rustc_const_eval/src/util/compare_types.rs
@@ -5,7 +5,7 @@
use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::traits::{DefiningAnchor, ObligationCause};
-use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
+use rustc_middle::ty::{ParamEnv, Ty, TyCtxt, Variance};
use rustc_trait_selection::traits::ObligationCtxt;
/// Returns whether the two types are equal up to subtyping.
@@ -24,16 +24,22 @@ pub fn is_equal_up_to_subtyping<'tcx>(
}
// Check for subtyping in either direction.
- is_subtype(tcx, param_env, src, dest) || is_subtype(tcx, param_env, dest, src)
+ relate_types(tcx, param_env, Variance::Covariant, src, dest)
+ || relate_types(tcx, param_env, Variance::Covariant, dest, src)
}
/// Returns whether `src` is a subtype of `dest`, i.e. `src <: dest`.
///
+/// When validating assignments, the variance should be `Covariant`. When checking
+/// during `MirPhase` >= `MirPhase::Runtime(RuntimePhase::Initial)` variance should be `Invariant`
+/// because we want to check for type equality.
+///
/// This mostly ignores opaque types as it can be used in constraining contexts
/// while still computing the final underlying type.
-pub fn is_subtype<'tcx>(
+pub fn relate_types<'tcx>(
tcx: TyCtxt<'tcx>,
param_env: ParamEnv<'tcx>,
+ variance: Variance,
src: Ty<'tcx>,
dest: Ty<'tcx>,
) -> bool {
@@ -48,7 +54,7 @@ pub fn is_subtype<'tcx>(
let cause = ObligationCause::dummy();
let src = ocx.normalize(&cause, param_env, src);
let dest = ocx.normalize(&cause, param_env, dest);
- match ocx.sub(&cause, param_env, src, dest) {
+ match ocx.relate(&cause, param_env, variance, src, dest) {
Ok(()) => {}
Err(_) => return false,
};
diff --git a/compiler/rustc_const_eval/src/util/mod.rs b/compiler/rustc_const_eval/src/util/mod.rs
index 289e34225..040b3071e 100644
--- a/compiler/rustc_const_eval/src/util/mod.rs
+++ b/compiler/rustc_const_eval/src/util/mod.rs
@@ -5,9 +5,9 @@ mod check_validity_requirement;
mod compare_types;
mod type_name;
-pub use self::alignment::is_disaligned;
+pub use self::alignment::{is_disaligned, is_within_packed};
pub use self::check_validity_requirement::check_validity_requirement;
-pub use self::compare_types::{is_equal_up_to_subtyping, is_subtype};
+pub use self::compare_types::{is_equal_up_to_subtyping, relate_types};
pub use self::type_name::type_name;
/// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
diff --git a/compiler/rustc_const_eval/src/util/type_name.rs b/compiler/rustc_const_eval/src/util/type_name.rs
index 14a840ad1..a924afda6 100644
--- a/compiler/rustc_const_eval/src/util/type_name.rs
+++ b/compiler/rustc_const_eval/src/util/type_name.rs
@@ -64,8 +64,7 @@ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
ty::Alias(ty::Weak, _) => bug!("type_name: unexpected weak projection"),
ty::Alias(ty::Inherent, _) => bug!("type_name: unexpected inherent projection"),
- ty::GeneratorWitness(_) => bug!("type_name: unexpected `GeneratorWitness`"),
- ty::GeneratorWitnessMIR(..) => bug!("type_name: unexpected `GeneratorWitnessMIR`"),
+ ty::GeneratorWitness(..) => bug!("type_name: unexpected `GeneratorWitness`"),
}
}
diff --git a/compiler/rustc_data_structures/src/flock/unix.rs b/compiler/rustc_data_structures/src/flock/unix.rs
index 4e5297d58..eff9e8f83 100644
--- a/compiler/rustc_data_structures/src/flock/unix.rs
+++ b/compiler/rustc_data_structures/src/flock/unix.rs
@@ -21,8 +21,16 @@ impl Lock {
let lock_type = if exclusive { libc::F_WRLCK } else { libc::F_RDLCK };
let mut flock: libc::flock = unsafe { mem::zeroed() };
- flock.l_type = lock_type as libc::c_short;
- flock.l_whence = libc::SEEK_SET as libc::c_short;
+ #[cfg(not(all(target_os = "hurd", target_arch = "x86")))]
+ {
+ flock.l_type = lock_type as libc::c_short;
+ flock.l_whence = libc::SEEK_SET as libc::c_short;
+ }
+ #[cfg(all(target_os = "hurd", target_arch = "x86"))]
+ {
+ flock.l_type = lock_type as libc::c_int;
+ flock.l_whence = libc::SEEK_SET as libc::c_int;
+ }
flock.l_start = 0;
flock.l_len = 0;
@@ -39,8 +47,16 @@ impl Lock {
impl Drop for Lock {
fn drop(&mut self) {
let mut flock: libc::flock = unsafe { mem::zeroed() };
- flock.l_type = libc::F_UNLCK as libc::c_short;
- flock.l_whence = libc::SEEK_SET as libc::c_short;
+ #[cfg(not(all(target_os = "hurd", target_arch = "x86")))]
+ {
+ flock.l_type = libc::F_UNLCK as libc::c_short;
+ flock.l_whence = libc::SEEK_SET as libc::c_short;
+ }
+ #[cfg(all(target_os = "hurd", target_arch = "x86"))]
+ {
+ flock.l_type = libc::F_UNLCK as libc::c_int;
+ flock.l_whence = libc::SEEK_SET as libc::c_int;
+ }
flock.l_start = 0;
flock.l_len = 0;
diff --git a/compiler/rustc_data_structures/src/graph/dominators/mod.rs b/compiler/rustc_data_structures/src/graph/dominators/mod.rs
index 85ef2de9b..4075481e5 100644
--- a/compiler/rustc_data_structures/src/graph/dominators/mod.rs
+++ b/compiler/rustc_data_structures/src/graph/dominators/mod.rs
@@ -51,7 +51,7 @@ pub fn dominators<G: ControlFlowGraph>(graph: &G) -> Dominators<G::Node> {
// Traverse the graph, collecting a number of things:
//
// * Preorder mapping (to it, and back to the actual ordering)
- // * Postorder mapping (used exclusively for rank_partial_cmp on the final product)
+ // * Postorder mapping (used exclusively for `cmp_in_dominator_order` on the final product)
// * Parents for each vertex in the preorder tree
//
// These are all done here rather than through one of the 'standard'
@@ -342,8 +342,8 @@ impl<Node: Idx> Dominators<Node> {
/// relationship, the dominator will always precede the dominated. (The relative ordering
/// of two unrelated nodes will also be consistent, but otherwise the order has no
/// meaning.) This method cannot be used to determine if either Node dominates the other.
- pub fn rank_partial_cmp(&self, lhs: Node, rhs: Node) -> Option<Ordering> {
- self.post_order_rank[rhs].partial_cmp(&self.post_order_rank[lhs])
+ pub fn cmp_in_dominator_order(&self, lhs: Node, rhs: Node) -> Ordering {
+ self.post_order_rank[rhs].cmp(&self.post_order_rank[lhs])
}
/// Returns true if `a` dominates `b`.
diff --git a/compiler/rustc_data_structures/src/graph/implementation/mod.rs b/compiler/rustc_data_structures/src/graph/implementation/mod.rs
index 9ff401c3c..3910c6fa4 100644
--- a/compiler/rustc_data_structures/src/graph/implementation/mod.rs
+++ b/compiler/rustc_data_structures/src/graph/implementation/mod.rs
@@ -20,7 +20,6 @@
//! the field `next_edge`). Each of those fields is an array that should
//! be indexed by the direction (see the type `Direction`).
-use crate::snapshot_vec::{SnapshotVec, SnapshotVecDelegate};
use rustc_index::bit_set::BitSet;
use std::fmt::Debug;
@@ -28,8 +27,8 @@ use std::fmt::Debug;
mod tests;
pub struct Graph<N, E> {
- nodes: SnapshotVec<Node<N>>,
- edges: SnapshotVec<Edge<E>>,
+ nodes: Vec<Node<N>>,
+ edges: Vec<Edge<E>>,
}
pub struct Node<N> {
@@ -45,20 +44,6 @@ pub struct Edge<E> {
pub data: E,
}
-impl<N> SnapshotVecDelegate for Node<N> {
- type Value = Node<N>;
- type Undo = ();
-
- fn reverse(_: &mut Vec<Node<N>>, _: ()) {}
-}
-
-impl<N> SnapshotVecDelegate for Edge<N> {
- type Value = Edge<N>;
- type Undo = ();
-
- fn reverse(_: &mut Vec<Edge<N>>, _: ()) {}
-}
-
#[derive(Copy, Clone, PartialEq, Debug)]
pub struct NodeIndex(pub usize);
@@ -86,11 +71,11 @@ impl NodeIndex {
impl<N: Debug, E: Debug> Graph<N, E> {
pub fn new() -> Graph<N, E> {
- Graph { nodes: SnapshotVec::new(), edges: SnapshotVec::new() }
+ Graph { nodes: Vec::new(), edges: Vec::new() }
}
pub fn with_capacity(nodes: usize, edges: usize) -> Graph<N, E> {
- Graph { nodes: SnapshotVec::with_capacity(nodes), edges: SnapshotVec::with_capacity(edges) }
+ Graph { nodes: Vec::with_capacity(nodes), edges: Vec::with_capacity(edges) }
}
// # Simple accessors
diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs
index 337720897..461ec3a90 100644
--- a/compiler/rustc_data_structures/src/lib.rs
+++ b/compiler/rustc_data_structures/src/lib.rs
@@ -37,7 +37,7 @@
#![allow(rustc::potential_query_instability)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#![deny(unsafe_op_in_unsafe_fn)]
#[macro_use]
@@ -47,11 +47,14 @@ extern crate cfg_if;
#[macro_use]
extern crate rustc_macros;
+use std::fmt;
+
pub use rustc_index::static_assert_size;
+/// This calls the passed function while ensuring it won't be inlined into the caller.
#[inline(never)]
#[cold]
-pub fn cold_path<F: FnOnce() -> R, R>(f: F) -> R {
+pub fn outline<F: FnOnce() -> R, R>(f: F) -> R {
f()
}
@@ -126,6 +129,23 @@ impl<F: FnOnce()> Drop for OnDrop<F> {
}
}
+/// Turns a closure that takes an `&mut Formatter` into something that can be display-formatted.
+pub fn make_display(f: impl Fn(&mut fmt::Formatter<'_>) -> fmt::Result) -> impl fmt::Display {
+ struct Printer<F> {
+ f: F,
+ }
+ impl<F> fmt::Display for Printer<F>
+ where
+ F: Fn(&mut fmt::Formatter<'_>) -> fmt::Result,
+ {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (self.f)(fmt)
+ }
+ }
+
+ Printer { f }
+}
+
// See comments in src/librustc_middle/lib.rs
#[doc(hidden)]
pub fn __noop_fix_for_27438() {}
diff --git a/compiler/rustc_data_structures/src/marker.rs b/compiler/rustc_data_structures/src/marker.rs
index f8c06f9a8..b067f9d45 100644
--- a/compiler/rustc_data_structures/src/marker.rs
+++ b/compiler/rustc_data_structures/src/marker.rs
@@ -92,7 +92,6 @@ cfg_if!(
[std::collections::BTreeMap<K, V, A> where K: DynSend, V: DynSend, A: std::alloc::Allocator + Clone + DynSend]
[Vec<T, A> where T: DynSend, A: std::alloc::Allocator + DynSend]
[Box<T, A> where T: ?Sized + DynSend, A: std::alloc::Allocator + DynSend]
- [crate::sync::Lock<T> where T: DynSend]
[crate::sync::RwLock<T> where T: DynSend]
[crate::tagged_ptr::CopyTaggedPtr<P, T, CP> where P: Send + crate::tagged_ptr::Pointer, T: Send + crate::tagged_ptr::Tag, const CP: bool]
[rustc_arena::TypedArena<T> where T: DynSend]
@@ -171,7 +170,6 @@ cfg_if!(
[std::collections::BTreeMap<K, V, A> where K: DynSync, V: DynSync, A: std::alloc::Allocator + Clone + DynSync]
[Vec<T, A> where T: DynSync, A: std::alloc::Allocator + DynSync]
[Box<T, A> where T: ?Sized + DynSync, A: std::alloc::Allocator + DynSync]
- [crate::sync::Lock<T> where T: DynSend]
[crate::sync::RwLock<T> where T: DynSend + DynSync]
[crate::sync::OneThread<T> where T]
[crate::sync::WorkerLocal<T> where T: DynSend]
diff --git a/compiler/rustc_data_structures/src/memmap.rs b/compiler/rustc_data_structures/src/memmap.rs
index ca908671a..30403a614 100644
--- a/compiler/rustc_data_structures/src/memmap.rs
+++ b/compiler/rustc_data_structures/src/memmap.rs
@@ -11,9 +11,14 @@ pub struct Mmap(Vec<u8>);
#[cfg(not(target_arch = "wasm32"))]
impl Mmap {
+ /// # Safety
+ ///
+ /// The given file must not be mutated (i.e., not written, not truncated, ...) until the mapping is closed.
+ ///
+ /// However in practice most callers do not ensure this, so uses of this function are likely unsound.
#[inline]
pub unsafe fn map(file: File) -> io::Result<Self> {
- // Safety: this is in fact not safe.
+ // Safety: the caller must ensure that this is safe.
unsafe { memmap2::Mmap::map(&file).map(Mmap) }
}
}
diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs
index 3c76c2b79..e688feb5f 100644
--- a/compiler/rustc_data_structures/src/profiling.rs
+++ b/compiler/rustc_data_structures/src/profiling.rs
@@ -81,8 +81,8 @@
//!
//! [mm]: https://github.com/rust-lang/measureme/
-use crate::cold_path;
use crate::fx::FxHashMap;
+use crate::outline;
use std::borrow::Borrow;
use std::collections::hash_map::Entry;
@@ -697,7 +697,7 @@ impl<'a> TimingGuard<'a> {
#[inline]
pub fn finish_with_query_invocation_id(self, query_invocation_id: QueryInvocationId) {
if let Some(guard) = self.0 {
- cold_path(|| {
+ outline(|| {
let event_id = StringId::new_virtual(query_invocation_id.0);
let event_id = EventId::from_virtual(event_id);
guard.finish_with_override_event_id(event_id);
diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs
index 40cbf1495..29516fffd 100644
--- a/compiler/rustc_data_structures/src/sharded.rs
+++ b/compiler/rustc_data_structures/src/sharded.rs
@@ -1,31 +1,29 @@
use crate::fx::{FxHashMap, FxHasher};
#[cfg(parallel_compiler)]
-use crate::sync::is_dyn_thread_safe;
-use crate::sync::{CacheAligned, Lock, LockGuard};
+use crate::sync::{is_dyn_thread_safe, CacheAligned};
+use crate::sync::{Lock, LockGuard, Mode};
+#[cfg(parallel_compiler)]
+use itertools::Either;
use std::borrow::Borrow;
use std::collections::hash_map::RawEntryMut;
use std::hash::{Hash, Hasher};
+use std::iter;
use std::mem;
-#[cfg(parallel_compiler)]
// 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700,
// but this should be tested on higher core count CPUs. How the `Sharded` type gets used
// may also affect the ideal number of shards.
const SHARD_BITS: usize = 5;
-#[cfg(not(parallel_compiler))]
-const SHARD_BITS: usize = 0;
-
-pub const SHARDS: usize = 1 << SHARD_BITS;
+#[cfg(parallel_compiler)]
+const SHARDS: usize = 1 << SHARD_BITS;
/// An array of cache-line aligned inner locked structures with convenience methods.
-pub struct Sharded<T> {
- /// This mask is used to ensure that accesses are inbounds of `shards`.
- /// When dynamic thread safety is off, this field is set to 0 causing only
- /// a single shard to be used for greater cache efficiency.
+/// A single field is used when the compiler uses only one thread.
+pub enum Sharded<T> {
+ Single(Lock<T>),
#[cfg(parallel_compiler)]
- mask: usize,
- shards: [CacheAligned<Lock<T>>; SHARDS],
+ Shards(Box<[CacheAligned<Lock<T>>; SHARDS]>),
}
impl<T: Default> Default for Sharded<T> {
@@ -38,62 +36,133 @@ impl<T: Default> Default for Sharded<T> {
impl<T> Sharded<T> {
#[inline]
pub fn new(mut value: impl FnMut() -> T) -> Self {
- Sharded {
- #[cfg(parallel_compiler)]
- mask: if is_dyn_thread_safe() { SHARDS - 1 } else { 0 },
- shards: [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))),
+ #[cfg(parallel_compiler)]
+ if is_dyn_thread_safe() {
+ return Sharded::Shards(Box::new(
+ [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))),
+ ));
}
+
+ Sharded::Single(Lock::new(value()))
}
- #[inline(always)]
- fn mask(&self) -> usize {
- #[cfg(parallel_compiler)]
- {
- if SHARDS == 1 { 0 } else { self.mask }
- }
- #[cfg(not(parallel_compiler))]
- {
- 0
+ /// The shard is selected by hashing `val` with `FxHasher`.
+ #[inline]
+ pub fn get_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> &Lock<T> {
+ match self {
+ Self::Single(single) => &single,
+ #[cfg(parallel_compiler)]
+ Self::Shards(..) => self.get_shard_by_hash(make_hash(_val)),
}
}
- #[inline(always)]
- fn count(&self) -> usize {
- // `self.mask` is always one below the used shard count
- self.mask() + 1
+ #[inline]
+ pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> {
+ self.get_shard_by_index(get_shard_hash(hash))
+ }
+
+ #[inline]
+ pub fn get_shard_by_index(&self, _i: usize) -> &Lock<T> {
+ match self {
+ Self::Single(single) => &single,
+ #[cfg(parallel_compiler)]
+ Self::Shards(shards) => {
+ // SAFETY: The index gets ANDed with the shard mask, ensuring it is always inbounds.
+ unsafe { &shards.get_unchecked(_i & (SHARDS - 1)).0 }
+ }
+ }
}
/// The shard is selected by hashing `val` with `FxHasher`.
#[inline]
- pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> {
- self.get_shard_by_hash(if SHARDS == 1 { 0 } else { make_hash(val) })
+ #[track_caller]
+ pub fn lock_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> LockGuard<'_, T> {
+ match self {
+ Self::Single(single) => {
+ // Syncronization is disabled so use the `lock_assume_no_sync` method optimized
+ // for that case.
+
+ // SAFETY: We know `is_dyn_thread_safe` was false when creating the lock thus
+ // `might_be_dyn_thread_safe` was also false.
+ unsafe { single.lock_assume(Mode::NoSync) }
+ }
+ #[cfg(parallel_compiler)]
+ Self::Shards(..) => self.lock_shard_by_hash(make_hash(_val)),
+ }
+ }
+
+ #[inline]
+ #[track_caller]
+ pub fn lock_shard_by_hash(&self, hash: u64) -> LockGuard<'_, T> {
+ self.lock_shard_by_index(get_shard_hash(hash))
}
#[inline]
- pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> {
- self.get_shard_by_index(get_shard_hash(hash))
+ #[track_caller]
+ pub fn lock_shard_by_index(&self, _i: usize) -> LockGuard<'_, T> {
+ match self {
+ Self::Single(single) => {
+ // Syncronization is disabled so use the `lock_assume_no_sync` method optimized
+ // for that case.
+
+ // SAFETY: We know `is_dyn_thread_safe` was false when creating the lock thus
+ // `might_be_dyn_thread_safe` was also false.
+ unsafe { single.lock_assume(Mode::NoSync) }
+ }
+ #[cfg(parallel_compiler)]
+ Self::Shards(shards) => {
+ // Syncronization is enabled so use the `lock_assume_sync` method optimized
+ // for that case.
+
+ // SAFETY (get_unchecked): The index gets ANDed with the shard mask, ensuring it is
+ // always inbounds.
+ // SAFETY (lock_assume_sync): We know `is_dyn_thread_safe` was true when creating
+ // the lock thus `might_be_dyn_thread_safe` was also true.
+ unsafe { shards.get_unchecked(_i & (SHARDS - 1)).0.lock_assume(Mode::Sync) }
+ }
+ }
}
#[inline]
- pub fn get_shard_by_index(&self, i: usize) -> &Lock<T> {
- // SAFETY: The index get ANDed with the mask, ensuring it is always inbounds.
- unsafe { &self.shards.get_unchecked(i & self.mask()).0 }
+ pub fn lock_shards(&self) -> impl Iterator<Item = LockGuard<'_, T>> {
+ match self {
+ #[cfg(not(parallel_compiler))]
+ Self::Single(single) => iter::once(single.lock()),
+ #[cfg(parallel_compiler)]
+ Self::Single(single) => Either::Left(iter::once(single.lock())),
+ #[cfg(parallel_compiler)]
+ Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.lock())),
+ }
}
- pub fn lock_shards(&self) -> Vec<LockGuard<'_, T>> {
- (0..self.count()).map(|i| self.get_shard_by_index(i).lock()).collect()
+ #[inline]
+ pub fn try_lock_shards(&self) -> impl Iterator<Item = Option<LockGuard<'_, T>>> {
+ match self {
+ #[cfg(not(parallel_compiler))]
+ Self::Single(single) => iter::once(single.try_lock()),
+ #[cfg(parallel_compiler)]
+ Self::Single(single) => Either::Left(iter::once(single.try_lock())),
+ #[cfg(parallel_compiler)]
+ Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.try_lock())),
+ }
}
+}
- pub fn try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>> {
- (0..self.count()).map(|i| self.get_shard_by_index(i).try_lock()).collect()
+#[inline]
+pub fn shards() -> usize {
+ #[cfg(parallel_compiler)]
+ if is_dyn_thread_safe() {
+ return SHARDS;
}
+
+ 1
}
pub type ShardedHashMap<K, V> = Sharded<FxHashMap<K, V>>;
impl<K: Eq, V> ShardedHashMap<K, V> {
pub fn len(&self) -> usize {
- self.lock_shards().iter().map(|shard| shard.len()).sum()
+ self.lock_shards().map(|shard| shard.len()).sum()
}
}
@@ -105,7 +174,7 @@ impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
Q: Hash + Eq,
{
let hash = make_hash(value);
- let mut shard = self.get_shard_by_hash(hash).lock();
+ let mut shard = self.lock_shard_by_hash(hash);
let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, value);
match entry {
@@ -125,7 +194,7 @@ impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
Q: Hash + Eq,
{
let hash = make_hash(&value);
- let mut shard = self.get_shard_by_hash(hash).lock();
+ let mut shard = self.lock_shard_by_hash(hash);
let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, &value);
match entry {
@@ -147,7 +216,7 @@ pub trait IntoPointer {
impl<K: Eq + Hash + Copy + IntoPointer> ShardedHashMap<K, ()> {
pub fn contains_pointer_to<T: Hash + IntoPointer>(&self, value: &T) -> bool {
let hash = make_hash(&value);
- let shard = self.get_shard_by_hash(hash).lock();
+ let shard = self.lock_shard_by_hash(hash);
let value = value.into_pointer();
shard.raw_entry().from_hash(hash, |entry| entry.into_pointer() == value).is_some()
}
diff --git a/compiler/rustc_data_structures/src/small_c_str.rs b/compiler/rustc_data_structures/src/small_c_str.rs
index 719e4e3d9..349fd7f97 100644
--- a/compiler/rustc_data_structures/src/small_c_str.rs
+++ b/compiler/rustc_data_structures/src/small_c_str.rs
@@ -79,3 +79,9 @@ impl<'a> FromIterator<&'a str> for SmallCStr {
Self { data }
}
}
+
+impl From<&ffi::CStr> for SmallCStr {
+ fn from(s: &ffi::CStr) -> Self {
+ Self { data: SmallVec::from_slice(s.to_bytes()) }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs
index 25a082373..cca043ba0 100644
--- a/compiler/rustc_data_structures/src/sync.rs
+++ b/compiler/rustc_data_structures/src/sync.rs
@@ -26,7 +26,8 @@
//! | `AtomicU64` | `Cell<u64>` | `atomic::AtomicU64` |
//! | `AtomicUsize` | `Cell<usize>` | `atomic::AtomicUsize` |
//! | | | |
-//! | `Lock<T>` | `RefCell<T>` | `parking_lot::Mutex<T>` |
+//! | `Lock<T>` | `RefCell<T>` | `RefCell<T>` or |
+//! | | | `parking_lot::Mutex<T>` |
//! | `RwLock<T>` | `RefCell<T>` | `parking_lot::RwLock<T>` |
//! | `MTLock<T>` [^1] | `T` | `Lock<T>` |
//! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>` |
@@ -43,11 +44,18 @@ pub use crate::marker::*;
use std::collections::HashMap;
use std::hash::{BuildHasher, Hash};
use std::ops::{Deref, DerefMut};
-use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
+
+mod lock;
+pub use lock::{Lock, LockGuard, Mode};
mod worker_local;
pub use worker_local::{Registry, WorkerLocal};
+mod parallel;
+#[cfg(parallel_compiler)]
+pub use parallel::scope;
+pub use parallel::{join, par_for_each_in, par_map, parallel_guard};
+
pub use std::sync::atomic::Ordering;
pub use std::sync::atomic::Ordering::SeqCst;
@@ -55,6 +63,9 @@ pub use vec::{AppendOnlyIndexVec, AppendOnlyVec};
mod vec;
+mod freeze;
+pub use freeze::{FreezeLock, FreezeReadGuard, FreezeWriteGuard};
+
mod mode {
use super::Ordering;
use std::sync::atomic::AtomicU8;
@@ -75,6 +86,12 @@ mod mode {
}
}
+ // Whether thread safety might be enabled.
+ #[inline]
+ pub fn might_be_dyn_thread_safe() -> bool {
+ DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) != DYN_NOT_THREAD_SAFE
+ }
+
// Only set by the `-Z threads` compile option
pub fn set_dyn_thread_safe_mode(mode: bool) {
let set: u8 = if mode { DYN_THREAD_SAFE } else { DYN_NOT_THREAD_SAFE };
@@ -94,14 +111,15 @@ pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};
cfg_if! {
if #[cfg(not(parallel_compiler))] {
+ use std::ops::Add;
+ use std::cell::Cell;
+
pub unsafe auto trait Send {}
pub unsafe auto trait Sync {}
unsafe impl<T> Send for T {}
unsafe impl<T> Sync for T {}
- use std::ops::Add;
-
/// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc.
/// It has explicit ordering arguments and is only intended for use with
/// the native atomic types.
@@ -182,88 +200,17 @@ cfg_if! {
pub type AtomicU32 = Atomic<u32>;
pub type AtomicU64 = Atomic<u64>;
- pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
- where A: FnOnce() -> RA,
- B: FnOnce() -> RB
- {
- (oper_a(), oper_b())
- }
-
- #[macro_export]
- macro_rules! parallel {
- ($($blocks:block),*) => {
- // We catch panics here ensuring that all the blocks execute.
- // This makes behavior consistent with the parallel compiler.
- let mut panic = None;
- $(
- if let Err(p) = ::std::panic::catch_unwind(
- ::std::panic::AssertUnwindSafe(|| $blocks)
- ) {
- if panic.is_none() {
- panic = Some(p);
- }
- }
- )*
- if let Some(panic) = panic {
- ::std::panic::resume_unwind(panic);
- }
- }
- }
-
- pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item) + Sync + Send) {
- // We catch panics here ensuring that all the loop iterations execute.
- // This makes behavior consistent with the parallel compiler.
- let mut panic = None;
- t.into_iter().for_each(|i| {
- if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
- if panic.is_none() {
- panic = Some(p);
- }
- }
- });
- if let Some(panic) = panic {
- resume_unwind(panic);
- }
- }
-
- pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>(
- t: T,
- mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R,
- ) -> C {
- // We catch panics here ensuring that all the loop iterations execute.
- let mut panic = None;
- let r = t.into_iter().filter_map(|i| {
- match catch_unwind(AssertUnwindSafe(|| map(i))) {
- Ok(r) => Some(r),
- Err(p) => {
- if panic.is_none() {
- panic = Some(p);
- }
- None
- }
- }
- }).collect();
- if let Some(panic) = panic {
- resume_unwind(panic);
- }
- r
- }
-
pub use std::rc::Rc as Lrc;
pub use std::rc::Weak as Weak;
pub use std::cell::Ref as ReadGuard;
pub use std::cell::Ref as MappedReadGuard;
pub use std::cell::RefMut as WriteGuard;
pub use std::cell::RefMut as MappedWriteGuard;
- pub use std::cell::RefMut as LockGuard;
pub use std::cell::RefMut as MappedLockGuard;
- pub use std::cell::OnceCell;
+ pub use std::cell::OnceCell as OnceLock;
use std::cell::RefCell as InnerRwLock;
- use std::cell::RefCell as InnerLock;
-
- use std::cell::Cell;
pub type MTLockRef<'a, T> = &'a mut MTLock<T>;
@@ -313,10 +260,9 @@ cfg_if! {
pub use parking_lot::RwLockWriteGuard as WriteGuard;
pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
- pub use parking_lot::MutexGuard as LockGuard;
pub use parking_lot::MappedMutexGuard as MappedLockGuard;
- pub use std::sync::OnceLock as OnceCell;
+ pub use std::sync::OnceLock;
pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64};
@@ -355,171 +301,10 @@ cfg_if! {
}
}
- use parking_lot::Mutex as InnerLock;
use parking_lot::RwLock as InnerRwLock;
use std::thread;
- #[inline]
- pub fn join<A, B, RA: DynSend, RB: DynSend>(oper_a: A, oper_b: B) -> (RA, RB)
- where
- A: FnOnce() -> RA + DynSend,
- B: FnOnce() -> RB + DynSend,
- {
- if mode::is_dyn_thread_safe() {
- let oper_a = FromDyn::from(oper_a);
- let oper_b = FromDyn::from(oper_b);
- let (a, b) = rayon::join(move || FromDyn::from(oper_a.into_inner()()), move || FromDyn::from(oper_b.into_inner()()));
- (a.into_inner(), b.into_inner())
- } else {
- (oper_a(), oper_b())
- }
- }
-
- // This function only works when `mode::is_dyn_thread_safe()`.
- pub fn scope<'scope, OP, R>(op: OP) -> R
- where
- OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend,
- R: DynSend,
- {
- let op = FromDyn::from(op);
- rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner()
- }
-
- /// Runs a list of blocks in parallel. The first block is executed immediately on
- /// the current thread. Use that for the longest running block.
- #[macro_export]
- macro_rules! parallel {
- (impl $fblock:block [$($c:expr,)*] [$block:expr $(, $rest:expr)*]) => {
- parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
- };
- (impl $fblock:block [$($blocks:expr,)*] []) => {
- ::rustc_data_structures::sync::scope(|s| {
- $(let block = rustc_data_structures::sync::FromDyn::from(|| $blocks);
- s.spawn(move |_| block.into_inner()());)*
- (|| $fblock)();
- });
- };
- ($fblock:block, $($blocks:block),*) => {
- if rustc_data_structures::sync::is_dyn_thread_safe() {
- // Reverse the order of the later blocks since Rayon executes them in reverse order
- // when using a single thread. This ensures the execution order matches that
- // of a single threaded rustc.
- parallel!(impl $fblock [] [$($blocks),*]);
- } else {
- // We catch panics here ensuring that all the blocks execute.
- // This makes behavior consistent with the parallel compiler.
- let mut panic = None;
- if let Err(p) = ::std::panic::catch_unwind(
- ::std::panic::AssertUnwindSafe(|| $fblock)
- ) {
- if panic.is_none() {
- panic = Some(p);
- }
- }
- $(
- if let Err(p) = ::std::panic::catch_unwind(
- ::std::panic::AssertUnwindSafe(|| $blocks)
- ) {
- if panic.is_none() {
- panic = Some(p);
- }
- }
- )*
- if let Some(panic) = panic {
- ::std::panic::resume_unwind(panic);
- }
- }
- };
- }
-
- use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator};
-
- pub fn par_for_each_in<I, T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>>(
- t: T,
- for_each: impl Fn(I) + DynSync + DynSend
- ) {
- if mode::is_dyn_thread_safe() {
- let for_each = FromDyn::from(for_each);
- let panic: Lock<Option<_>> = Lock::new(None);
- t.into_par_iter().for_each(|i| if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
- let mut l = panic.lock();
- if l.is_none() {
- *l = Some(p)
- }
- });
-
- if let Some(panic) = panic.into_inner() {
- resume_unwind(panic);
- }
- } else {
- // We catch panics here ensuring that all the loop iterations execute.
- // This makes behavior consistent with the parallel compiler.
- let mut panic = None;
- t.into_iter().for_each(|i| {
- if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
- if panic.is_none() {
- panic = Some(p);
- }
- }
- });
- if let Some(panic) = panic {
- resume_unwind(panic);
- }
- }
- }
-
- pub fn par_map<
- I,
- T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>,
- R: std::marker::Send,
- C: FromIterator<R> + FromParallelIterator<R>
- >(
- t: T,
- map: impl Fn(I) -> R + DynSync + DynSend
- ) -> C {
- if mode::is_dyn_thread_safe() {
- let panic: Lock<Option<_>> = Lock::new(None);
- let map = FromDyn::from(map);
- // We catch panics here ensuring that all the loop iterations execute.
- let r = t.into_par_iter().filter_map(|i| {
- match catch_unwind(AssertUnwindSafe(|| map(i))) {
- Ok(r) => Some(r),
- Err(p) => {
- let mut l = panic.lock();
- if l.is_none() {
- *l = Some(p);
- }
- None
- },
- }
- }).collect();
-
- if let Some(panic) = panic.into_inner() {
- resume_unwind(panic);
- }
- r
- } else {
- // We catch panics here ensuring that all the loop iterations execute.
- let mut panic = None;
- let r = t.into_iter().filter_map(|i| {
- match catch_unwind(AssertUnwindSafe(|| map(i))) {
- Ok(r) => Some(r),
- Err(p) => {
- if panic.is_none() {
- panic = Some(p);
- }
- None
- }
- }
- }).collect();
- if let Some(panic) = panic {
- resume_unwind(panic);
- }
- r
- }
- }
-
/// This makes locks panic if they are already held.
/// It is only useful when you are running in a single thread
const ERROR_CHECKING: bool = false;
@@ -542,81 +327,6 @@ impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S>
}
}
-#[derive(Debug)]
-pub struct Lock<T>(InnerLock<T>);
-
-impl<T> Lock<T> {
- #[inline(always)]
- pub fn new(inner: T) -> Self {
- Lock(InnerLock::new(inner))
- }
-
- #[inline(always)]
- pub fn into_inner(self) -> T {
- self.0.into_inner()
- }
-
- #[inline(always)]
- pub fn get_mut(&mut self) -> &mut T {
- self.0.get_mut()
- }
-
- #[cfg(parallel_compiler)]
- #[inline(always)]
- pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
- self.0.try_lock()
- }
-
- #[cfg(not(parallel_compiler))]
- #[inline(always)]
- pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
- self.0.try_borrow_mut().ok()
- }
-
- #[cfg(parallel_compiler)]
- #[inline(always)]
- #[track_caller]
- pub fn lock(&self) -> LockGuard<'_, T> {
- if ERROR_CHECKING {
- self.0.try_lock().expect("lock was already held")
- } else {
- self.0.lock()
- }
- }
-
- #[cfg(not(parallel_compiler))]
- #[inline(always)]
- #[track_caller]
- pub fn lock(&self) -> LockGuard<'_, T> {
- self.0.borrow_mut()
- }
-
- #[inline(always)]
- #[track_caller]
- pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
- f(&mut *self.lock())
- }
-
- #[inline(always)]
- #[track_caller]
- pub fn borrow(&self) -> LockGuard<'_, T> {
- self.lock()
- }
-
- #[inline(always)]
- #[track_caller]
- pub fn borrow_mut(&self) -> LockGuard<'_, T> {
- self.lock()
- }
-}
-
-impl<T: Default> Default for Lock<T> {
- #[inline]
- fn default() -> Self {
- Lock::new(T::default())
- }
-}
-
#[derive(Debug, Default)]
pub struct RwLock<T>(InnerRwLock<T>);
diff --git a/compiler/rustc_data_structures/src/sync/freeze.rs b/compiler/rustc_data_structures/src/sync/freeze.rs
new file mode 100644
index 000000000..466c44f59
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sync/freeze.rs
@@ -0,0 +1,200 @@
+use crate::sync::{AtomicBool, ReadGuard, RwLock, WriteGuard};
+#[cfg(parallel_compiler)]
+use crate::sync::{DynSend, DynSync};
+use std::{
+ cell::UnsafeCell,
+ intrinsics::likely,
+ marker::PhantomData,
+ ops::{Deref, DerefMut},
+ ptr::NonNull,
+ sync::atomic::Ordering,
+};
+
+/// A type which allows mutation using a lock until
+/// the value is frozen and can be accessed lock-free.
+///
+/// Unlike `RwLock`, it can be used to prevent mutation past a point.
+#[derive(Default)]
+pub struct FreezeLock<T> {
+ data: UnsafeCell<T>,
+ frozen: AtomicBool,
+
+ /// This lock protects writes to the `data` and `frozen` fields.
+ lock: RwLock<()>,
+}
+
+#[cfg(parallel_compiler)]
+unsafe impl<T: DynSync + DynSend> DynSync for FreezeLock<T> {}
+
+impl<T> FreezeLock<T> {
+ #[inline]
+ pub fn new(value: T) -> Self {
+ Self::with(value, false)
+ }
+
+ #[inline]
+ pub fn frozen(value: T) -> Self {
+ Self::with(value, true)
+ }
+
+ #[inline]
+ pub fn with(value: T, frozen: bool) -> Self {
+ Self {
+ data: UnsafeCell::new(value),
+ frozen: AtomicBool::new(frozen),
+ lock: RwLock::new(()),
+ }
+ }
+
+ /// Clones the inner value along with the frozen state.
+ #[inline]
+ pub fn clone(&self) -> Self
+ where
+ T: Clone,
+ {
+ let lock = self.read();
+ Self::with(lock.clone(), self.is_frozen())
+ }
+
+ #[inline]
+ pub fn is_frozen(&self) -> bool {
+ self.frozen.load(Ordering::Acquire)
+ }
+
+ /// Get the inner value if frozen.
+ #[inline]
+ pub fn get(&self) -> Option<&T> {
+ if likely(self.frozen.load(Ordering::Acquire)) {
+ // SAFETY: This is frozen so the data cannot be modified.
+ unsafe { Some(&*self.data.get()) }
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ pub fn read(&self) -> FreezeReadGuard<'_, T> {
+ FreezeReadGuard {
+ _lock_guard: if self.frozen.load(Ordering::Acquire) {
+ None
+ } else {
+ Some(self.lock.read())
+ },
+ data: unsafe { NonNull::new_unchecked(self.data.get()) },
+ }
+ }
+
+ #[inline]
+ pub fn borrow(&self) -> FreezeReadGuard<'_, T> {
+ self.read()
+ }
+
+ #[inline]
+ #[track_caller]
+ pub fn write(&self) -> FreezeWriteGuard<'_, T> {
+ self.try_write().expect("still mutable")
+ }
+
+ #[inline]
+ pub fn try_write(&self) -> Option<FreezeWriteGuard<'_, T>> {
+ let _lock_guard = self.lock.write();
+ // Use relaxed ordering since we're in the write lock.
+ if self.frozen.load(Ordering::Relaxed) {
+ None
+ } else {
+ Some(FreezeWriteGuard {
+ _lock_guard,
+ data: unsafe { NonNull::new_unchecked(self.data.get()) },
+ frozen: &self.frozen,
+ marker: PhantomData,
+ })
+ }
+ }
+
+ #[inline]
+ pub fn freeze(&self) -> &T {
+ if !self.frozen.load(Ordering::Acquire) {
+ // Get the lock to ensure no concurrent writes and that we release the latest write.
+ let _lock = self.lock.write();
+ self.frozen.store(true, Ordering::Release);
+ }
+
+ // SAFETY: This is frozen so the data cannot be modified and shared access is sound.
+ unsafe { &*self.data.get() }
+ }
+}
+
+/// A guard holding shared access to a `FreezeLock` which is in a locked state or frozen.
+#[must_use = "if unused the FreezeLock may immediately unlock"]
+pub struct FreezeReadGuard<'a, T: ?Sized> {
+ _lock_guard: Option<ReadGuard<'a, ()>>,
+ data: NonNull<T>,
+}
+
+impl<'a, T: ?Sized + 'a> Deref for FreezeReadGuard<'a, T> {
+ type Target = T;
+ #[inline]
+ fn deref(&self) -> &T {
+ // SAFETY: If the lock is not frozen, `_lock_guard` holds the lock to the `UnsafeCell` so
+ // this has shared access until the `FreezeReadGuard` is dropped. If the lock is frozen,
+ // the data cannot be modified and shared access is sound.
+ unsafe { &*self.data.as_ptr() }
+ }
+}
+
+impl<'a, T: ?Sized> FreezeReadGuard<'a, T> {
+ #[inline]
+ pub fn map<U: ?Sized>(this: Self, f: impl FnOnce(&T) -> &U) -> FreezeReadGuard<'a, U> {
+ FreezeReadGuard { data: NonNull::from(f(&*this)), _lock_guard: this._lock_guard }
+ }
+}
+
+/// A guard holding mutable access to a `FreezeLock` which is in a locked state or frozen.
+#[must_use = "if unused the FreezeLock may immediately unlock"]
+pub struct FreezeWriteGuard<'a, T: ?Sized> {
+ _lock_guard: WriteGuard<'a, ()>,
+ frozen: &'a AtomicBool,
+ data: NonNull<T>,
+ marker: PhantomData<&'a mut T>,
+}
+
+impl<'a, T> FreezeWriteGuard<'a, T> {
+ pub fn freeze(self) -> &'a T {
+ self.frozen.store(true, Ordering::Release);
+
+ // SAFETY: This is frozen so the data cannot be modified and shared access is sound.
+ unsafe { &*self.data.as_ptr() }
+ }
+}
+
+impl<'a, T: ?Sized> FreezeWriteGuard<'a, T> {
+ #[inline]
+ pub fn map<U: ?Sized>(
+ mut this: Self,
+ f: impl FnOnce(&mut T) -> &mut U,
+ ) -> FreezeWriteGuard<'a, U> {
+ FreezeWriteGuard {
+ data: NonNull::from(f(&mut *this)),
+ _lock_guard: this._lock_guard,
+ frozen: this.frozen,
+ marker: PhantomData,
+ }
+ }
+}
+
+impl<'a, T: ?Sized + 'a> Deref for FreezeWriteGuard<'a, T> {
+ type Target = T;
+ #[inline]
+ fn deref(&self) -> &T {
+ // SAFETY: `self._lock_guard` holds the lock to the `UnsafeCell` so this has shared access.
+ unsafe { &*self.data.as_ptr() }
+ }
+}
+
+impl<'a, T: ?Sized + 'a> DerefMut for FreezeWriteGuard<'a, T> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut T {
+ // SAFETY: `self._lock_guard` holds the lock to the `UnsafeCell` so this has mutable access.
+ unsafe { &mut *self.data.as_ptr() }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/sync/lock.rs b/compiler/rustc_data_structures/src/sync/lock.rs
new file mode 100644
index 000000000..339aebbf8
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sync/lock.rs
@@ -0,0 +1,275 @@
+//! This module implements a lock which only uses synchronization if `might_be_dyn_thread_safe` is true.
+//! It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync` traits.
+//!
+//! When `cfg(parallel_compiler)` is not set, the lock is instead a wrapper around `RefCell`.
+
+#![allow(dead_code)]
+
+use std::fmt;
+
+#[cfg(parallel_compiler)]
+pub use maybe_sync::*;
+#[cfg(not(parallel_compiler))]
+pub use no_sync::*;
+
+#[derive(Clone, Copy, PartialEq)]
+pub enum Mode {
+ NoSync,
+ Sync,
+}
+
+mod maybe_sync {
+ use super::Mode;
+ use crate::sync::mode;
+ #[cfg(parallel_compiler)]
+ use crate::sync::{DynSend, DynSync};
+ use parking_lot::lock_api::RawMutex as _;
+ use parking_lot::RawMutex;
+ use std::cell::Cell;
+ use std::cell::UnsafeCell;
+ use std::intrinsics::unlikely;
+ use std::marker::PhantomData;
+ use std::mem::ManuallyDrop;
+ use std::ops::{Deref, DerefMut};
+
+ /// A guard holding mutable access to a `Lock` which is in a locked state.
+ #[must_use = "if unused the Lock will immediately unlock"]
+ pub struct LockGuard<'a, T> {
+ lock: &'a Lock<T>,
+ marker: PhantomData<&'a mut T>,
+
+ /// The syncronization mode of the lock. This is explicitly passed to let LLVM relate it
+ /// to the original lock operation.
+ mode: Mode,
+ }
+
+ impl<'a, T: 'a> Deref for LockGuard<'a, T> {
+ type Target = T;
+ #[inline]
+ fn deref(&self) -> &T {
+ // SAFETY: We have shared access to the mutable access owned by this type,
+ // so we can give out a shared reference.
+ unsafe { &*self.lock.data.get() }
+ }
+ }
+
+ impl<'a, T: 'a> DerefMut for LockGuard<'a, T> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut T {
+ // SAFETY: We have mutable access to the data so we can give out a mutable reference.
+ unsafe { &mut *self.lock.data.get() }
+ }
+ }
+
+ impl<'a, T: 'a> Drop for LockGuard<'a, T> {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY (union access): We get `self.mode` from the lock operation so it is consistent
+ // with the `lock.mode` state. This means we access the right union fields.
+ match self.mode {
+ Mode::NoSync => {
+ let cell = unsafe { &self.lock.mode_union.no_sync };
+ debug_assert_eq!(cell.get(), true);
+ cell.set(false);
+ }
+ // SAFETY (unlock): We know that the lock is locked as this type is a proof of that.
+ Mode::Sync => unsafe { self.lock.mode_union.sync.unlock() },
+ }
+ }
+ }
+
+ union ModeUnion {
+ /// Indicates if the cell is locked. Only used if `Lock.mode` is `NoSync`.
+ no_sync: ManuallyDrop<Cell<bool>>,
+
+ /// A lock implementation that's only used if `Lock.mode` is `Sync`.
+ sync: ManuallyDrop<RawMutex>,
+ }
+
+ /// The value representing a locked state for the `Cell`.
+ const LOCKED: bool = true;
+
+ /// A lock which only uses synchronization if `might_be_dyn_thread_safe` is true.
+ /// It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync`.
+ pub struct Lock<T> {
+ /// Indicates if synchronization is used via `mode_union.sync` if it's `Sync`, or if a
+ /// not thread safe cell is used via `mode_union.no_sync` if it's `NoSync`.
+ /// This is set on initialization and never changed.
+ mode: Mode,
+
+ mode_union: ModeUnion,
+ data: UnsafeCell<T>,
+ }
+
+ impl<T> Lock<T> {
+ #[inline(always)]
+ pub fn new(inner: T) -> Self {
+ let (mode, mode_union) = if unlikely(mode::might_be_dyn_thread_safe()) {
+ // Create the lock with synchronization enabled using the `RawMutex` type.
+ (Mode::Sync, ModeUnion { sync: ManuallyDrop::new(RawMutex::INIT) })
+ } else {
+ // Create the lock with synchronization disabled.
+ (Mode::NoSync, ModeUnion { no_sync: ManuallyDrop::new(Cell::new(!LOCKED)) })
+ };
+ Lock { mode, mode_union, data: UnsafeCell::new(inner) }
+ }
+
+ #[inline(always)]
+ pub fn into_inner(self) -> T {
+ self.data.into_inner()
+ }
+
+ #[inline(always)]
+ pub fn get_mut(&mut self) -> &mut T {
+ self.data.get_mut()
+ }
+
+ #[inline(always)]
+ pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
+ let mode = self.mode;
+ // SAFETY: This is safe since the union fields are used in accordance with `self.mode`.
+ match mode {
+ Mode::NoSync => {
+ let cell = unsafe { &self.mode_union.no_sync };
+ let was_unlocked = cell.get() != LOCKED;
+ if was_unlocked {
+ cell.set(LOCKED);
+ }
+ was_unlocked
+ }
+ Mode::Sync => unsafe { self.mode_union.sync.try_lock() },
+ }
+ .then(|| LockGuard { lock: self, marker: PhantomData, mode })
+ }
+
+ /// This acquires the lock assuming syncronization is in a specific mode.
+ ///
+ /// Safety
+ /// This method must only be called with `Mode::Sync` if `might_be_dyn_thread_safe` was
+ /// true on lock creation.
+ #[inline(always)]
+ #[track_caller]
+ pub unsafe fn lock_assume(&self, mode: Mode) -> LockGuard<'_, T> {
+ #[inline(never)]
+ #[track_caller]
+ #[cold]
+ fn lock_held() -> ! {
+ panic!("lock was already held")
+ }
+
+ // SAFETY: This is safe since the union fields are used in accordance with `mode`
+ // which also must match `self.mode` due to the safety precondition.
+ unsafe {
+ match mode {
+ Mode::NoSync => {
+ if unlikely(self.mode_union.no_sync.replace(LOCKED) == LOCKED) {
+ lock_held()
+ }
+ }
+ Mode::Sync => self.mode_union.sync.lock(),
+ }
+ }
+ LockGuard { lock: self, marker: PhantomData, mode }
+ }
+
+ #[inline(always)]
+ #[track_caller]
+ pub fn lock(&self) -> LockGuard<'_, T> {
+ unsafe { self.lock_assume(self.mode) }
+ }
+ }
+
+ #[cfg(parallel_compiler)]
+ unsafe impl<T: DynSend> DynSend for Lock<T> {}
+ #[cfg(parallel_compiler)]
+ unsafe impl<T: DynSend> DynSync for Lock<T> {}
+}
+
+mod no_sync {
+ use super::Mode;
+ use std::cell::RefCell;
+
+ pub use std::cell::RefMut as LockGuard;
+
+ pub struct Lock<T>(RefCell<T>);
+
+ impl<T> Lock<T> {
+ #[inline(always)]
+ pub fn new(inner: T) -> Self {
+ Lock(RefCell::new(inner))
+ }
+
+ #[inline(always)]
+ pub fn into_inner(self) -> T {
+ self.0.into_inner()
+ }
+
+ #[inline(always)]
+ pub fn get_mut(&mut self) -> &mut T {
+ self.0.get_mut()
+ }
+
+ #[inline(always)]
+ pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
+ self.0.try_borrow_mut().ok()
+ }
+
+ #[inline(always)]
+ #[track_caller]
+ // This is unsafe to match the API for the `parallel_compiler` case.
+ pub unsafe fn lock_assume(&self, _mode: Mode) -> LockGuard<'_, T> {
+ self.0.borrow_mut()
+ }
+
+ #[inline(always)]
+ #[track_caller]
+ pub fn lock(&self) -> LockGuard<'_, T> {
+ self.0.borrow_mut()
+ }
+ }
+}
+
+impl<T> Lock<T> {
+ #[inline(always)]
+ #[track_caller]
+ pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
+ f(&mut *self.lock())
+ }
+
+ #[inline(always)]
+ #[track_caller]
+ pub fn borrow(&self) -> LockGuard<'_, T> {
+ self.lock()
+ }
+
+ #[inline(always)]
+ #[track_caller]
+ pub fn borrow_mut(&self) -> LockGuard<'_, T> {
+ self.lock()
+ }
+}
+
+impl<T: Default> Default for Lock<T> {
+ #[inline]
+ fn default() -> Self {
+ Lock::new(T::default())
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for Lock<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.try_lock() {
+ Some(guard) => f.debug_struct("Lock").field("data", &&*guard).finish(),
+ None => {
+ struct LockedPlaceholder;
+ impl fmt::Debug for LockedPlaceholder {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("<locked>")
+ }
+ }
+
+ f.debug_struct("Lock").field("data", &LockedPlaceholder).finish()
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/sync/parallel.rs b/compiler/rustc_data_structures/src/sync/parallel.rs
new file mode 100644
index 000000000..1944ddfb7
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sync/parallel.rs
@@ -0,0 +1,188 @@
+//! This module defines parallel operations that are implemented in
+//! one way for the serial compiler, and another way the parallel compiler.
+
+#![allow(dead_code)]
+
+use parking_lot::Mutex;
+use std::any::Any;
+use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
+
+#[cfg(not(parallel_compiler))]
+pub use disabled::*;
+#[cfg(parallel_compiler)]
+pub use enabled::*;
+
+/// A guard used to hold panics that occur during a parallel section to later by unwound.
+/// This is used for the parallel compiler to prevent fatal errors from non-deterministically
+/// hiding errors by ensuring that everything in the section has completed executing before
+/// continuing with unwinding. It's also used for the non-parallel code to ensure error message
+/// output match the parallel compiler for testing purposes.
+pub struct ParallelGuard {
+ panic: Mutex<Option<Box<dyn Any + Send + 'static>>>,
+}
+
+impl ParallelGuard {
+ pub fn run<R>(&self, f: impl FnOnce() -> R) -> Option<R> {
+ catch_unwind(AssertUnwindSafe(f))
+ .map_err(|err| {
+ *self.panic.lock() = Some(err);
+ })
+ .ok()
+ }
+}
+
+/// This gives access to a fresh parallel guard in the closure and will unwind any panics
+/// caught in it after the closure returns.
+#[inline]
+pub fn parallel_guard<R>(f: impl FnOnce(&ParallelGuard) -> R) -> R {
+ let guard = ParallelGuard { panic: Mutex::new(None) };
+ let ret = f(&guard);
+ if let Some(panic) = guard.panic.into_inner() {
+ resume_unwind(panic);
+ }
+ ret
+}
+
+mod disabled {
+ use crate::sync::parallel_guard;
+
+ #[macro_export]
+ #[cfg(not(parallel_compiler))]
+ macro_rules! parallel {
+ ($($blocks:block),*) => {{
+ $crate::sync::parallel_guard(|guard| {
+ $(guard.run(|| $blocks);)*
+ });
+ }}
+ }
+
+ pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
+ where
+ A: FnOnce() -> RA,
+ B: FnOnce() -> RB,
+ {
+ let (a, b) = parallel_guard(|guard| {
+ let a = guard.run(oper_a);
+ let b = guard.run(oper_b);
+ (a, b)
+ });
+ (a.unwrap(), b.unwrap())
+ }
+
+ pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item)) {
+ parallel_guard(|guard| {
+ t.into_iter().for_each(|i| {
+ guard.run(|| for_each(i));
+ });
+ })
+ }
+
+ pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>(
+ t: T,
+ mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R,
+ ) -> C {
+ parallel_guard(|guard| t.into_iter().filter_map(|i| guard.run(|| map(i))).collect())
+ }
+}
+
+#[cfg(parallel_compiler)]
+mod enabled {
+ use crate::sync::{mode, parallel_guard, DynSend, DynSync, FromDyn};
+
+ /// Runs a list of blocks in parallel. The first block is executed immediately on
+ /// the current thread. Use that for the longest running block.
+ #[macro_export]
+ macro_rules! parallel {
+ (impl $fblock:block [$($c:expr,)*] [$block:expr $(, $rest:expr)*]) => {
+ parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
+ };
+ (impl $fblock:block [$($blocks:expr,)*] []) => {
+ ::rustc_data_structures::sync::scope(|s| {
+ $(let block = rustc_data_structures::sync::FromDyn::from(|| $blocks);
+ s.spawn(move |_| block.into_inner()());)*
+ (|| $fblock)();
+ });
+ };
+ ($fblock:block, $($blocks:block),*) => {
+ if rustc_data_structures::sync::is_dyn_thread_safe() {
+ // Reverse the order of the later blocks since Rayon executes them in reverse order
+ // when using a single thread. This ensures the execution order matches that
+ // of a single threaded rustc.
+ parallel!(impl $fblock [] [$($blocks),*]);
+ } else {
+ $crate::sync::parallel_guard(|guard| {
+ guard.run(|| $fblock);
+ $(guard.run(|| $blocks);)*
+ });
+ }
+ };
+ }
+
+ // This function only works when `mode::is_dyn_thread_safe()`.
+ pub fn scope<'scope, OP, R>(op: OP) -> R
+ where
+ OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend,
+ R: DynSend,
+ {
+ let op = FromDyn::from(op);
+ rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner()
+ }
+
+ #[inline]
+ pub fn join<A, B, RA: DynSend, RB: DynSend>(oper_a: A, oper_b: B) -> (RA, RB)
+ where
+ A: FnOnce() -> RA + DynSend,
+ B: FnOnce() -> RB + DynSend,
+ {
+ if mode::is_dyn_thread_safe() {
+ let oper_a = FromDyn::from(oper_a);
+ let oper_b = FromDyn::from(oper_b);
+ let (a, b) = rayon::join(
+ move || FromDyn::from(oper_a.into_inner()()),
+ move || FromDyn::from(oper_b.into_inner()()),
+ );
+ (a.into_inner(), b.into_inner())
+ } else {
+ super::disabled::join(oper_a, oper_b)
+ }
+ }
+
+ use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator};
+
+ pub fn par_for_each_in<I, T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>>(
+ t: T,
+ for_each: impl Fn(I) + DynSync + DynSend,
+ ) {
+ parallel_guard(|guard| {
+ if mode::is_dyn_thread_safe() {
+ let for_each = FromDyn::from(for_each);
+ t.into_par_iter().for_each(|i| {
+ guard.run(|| for_each(i));
+ });
+ } else {
+ t.into_iter().for_each(|i| {
+ guard.run(|| for_each(i));
+ });
+ }
+ });
+ }
+
+ pub fn par_map<
+ I,
+ T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>,
+ R: std::marker::Send,
+ C: FromIterator<R> + FromParallelIterator<R>,
+ >(
+ t: T,
+ map: impl Fn(I) -> R + DynSync + DynSend,
+ ) -> C {
+ parallel_guard(|guard| {
+ if mode::is_dyn_thread_safe() {
+ let map = FromDyn::from(map);
+ t.into_par_iter().filter_map(|i| guard.run(|| map(i))).collect()
+ } else {
+ t.into_iter().filter_map(|i| guard.run(|| map(i))).collect()
+ }
+ })
+ }
+}
diff --git a/compiler/rustc_data_structures/src/sync/worker_local.rs b/compiler/rustc_data_structures/src/sync/worker_local.rs
index 8c84daf4f..ffafdba13 100644
--- a/compiler/rustc_data_structures/src/sync/worker_local.rs
+++ b/compiler/rustc_data_structures/src/sync/worker_local.rs
@@ -1,4 +1,4 @@
-use crate::sync::Lock;
+use parking_lot::Mutex;
use std::cell::Cell;
use std::cell::OnceCell;
use std::ops::Deref;
@@ -6,7 +6,7 @@ use std::ptr;
use std::sync::Arc;
#[cfg(parallel_compiler)]
-use {crate::cold_path, crate::sync::CacheAligned};
+use {crate::outline, crate::sync::CacheAligned};
/// A pointer to the `RegistryData` which uniquely identifies a registry.
/// This identifier can be reused if the registry gets freed.
@@ -25,17 +25,13 @@ impl RegistryId {
fn verify(self) -> usize {
let (id, index) = THREAD_DATA.with(|data| (data.registry_id.get(), data.index.get()));
- if id == self {
- index
- } else {
- cold_path(|| panic!("Unable to verify registry association"))
- }
+ if id == self { index } else { outline(|| panic!("Unable to verify registry association")) }
}
}
struct RegistryData {
thread_limit: usize,
- threads: Lock<usize>,
+ threads: Mutex<usize>,
}
/// Represents a list of threads which can access worker locals.
@@ -65,7 +61,7 @@ thread_local! {
impl Registry {
/// Creates a registry which can hold up to `thread_limit` threads.
pub fn new(thread_limit: usize) -> Self {
- Registry(Arc::new(RegistryData { thread_limit, threads: Lock::new(0) }))
+ Registry(Arc::new(RegistryData { thread_limit, threads: Mutex::new(0) }))
}
/// Gets the registry associated with the current thread. Panics if there's no such registry.
@@ -171,3 +167,9 @@ impl<T> Deref for WorkerLocal<T> {
unsafe { &self.locals.get_unchecked(self.registry.id().verify()).0 }
}
}
+
+impl<T: Default> Default for WorkerLocal<T> {
+ fn default() -> Self {
+ WorkerLocal::new(|_| T::default())
+ }
+}
diff --git a/compiler/rustc_driver_impl/src/lib.rs b/compiler/rustc_driver_impl/src/lib.rs
index 736877bde..65c7aed3f 100644
--- a/compiler/rustc_driver_impl/src/lib.rs
+++ b/compiler/rustc_driver_impl/src/lib.rs
@@ -85,6 +85,15 @@ pub mod pretty;
#[macro_use]
mod print;
mod session_diagnostics;
+#[cfg(all(unix, any(target_env = "gnu", target_os = "macos")))]
+mod signal_handler;
+
+#[cfg(not(all(unix, any(target_env = "gnu", target_os = "macos"))))]
+mod signal_handler {
+ /// On platforms which don't support our signal handler's requirements,
+ /// simply use the default signal handler provided by std.
+ pub(super) fn install() {}
+}
use crate::session_diagnostics::{
RLinkEmptyVersionNumber, RLinkEncodingVersionMismatch, RLinkRustcVersionMismatch,
@@ -140,12 +149,6 @@ pub const EXIT_FAILURE: i32 = 1;
pub const DEFAULT_BUG_REPORT_URL: &str = "https://github.com/rust-lang/rust/issues/new\
?labels=C-bug%2C+I-ICE%2C+T-compiler&template=ice.md";
-const ICE_REPORT_COMPILER_FLAGS: &[&str] = &["-Z", "-C", "--crate-type"];
-
-const ICE_REPORT_COMPILER_FLAGS_EXCLUDE: &[&str] = &["metadata", "extra-filename"];
-
-const ICE_REPORT_COMPILER_FLAGS_STRIP_VALUE: &[&str] = &["incremental"];
-
pub fn abort_on_err<T>(result: Result<T, ErrorGuaranteed>, sess: &Session) -> T {
match result {
Err(..) => {
@@ -159,9 +162,10 @@ pub fn abort_on_err<T>(result: Result<T, ErrorGuaranteed>, sess: &Session) -> T
pub trait Callbacks {
/// Called before creating the compiler instance
fn config(&mut self, _config: &mut interface::Config) {}
- /// Called after parsing. Return value instructs the compiler whether to
+ /// Called after parsing the crate root. Submodules are not yet parsed when
+ /// this callback is called. Return value instructs the compiler whether to
/// continue the compilation afterwards (defaults to `Compilation::Continue`)
- fn after_parsing<'tcx>(
+ fn after_crate_root_parsing<'tcx>(
&mut self,
_compiler: &interface::Compiler,
_queries: &'tcx Queries<'tcx>,
@@ -181,7 +185,6 @@ pub trait Callbacks {
/// continue the compilation afterwards (defaults to `Compilation::Continue`)
fn after_analysis<'tcx>(
&mut self,
- _handler: &EarlyErrorHandler,
_compiler: &interface::Compiler,
_queries: &'tcx Queries<'tcx>,
) -> Compilation {
@@ -310,6 +313,7 @@ fn run_compiler(
override_queries: None,
make_codegen_backend,
registry: diagnostics_registry(),
+ expanded_args: args,
};
match make_input(&early_error_handler, &matches.free) {
@@ -403,7 +407,7 @@ fn run_compiler(
return early_exit();
}
- if callbacks.after_parsing(compiler, queries) == Compilation::Stop {
+ if callbacks.after_crate_root_parsing(compiler, queries) == Compilation::Stop {
return early_exit();
}
@@ -441,7 +445,7 @@ fn run_compiler(
queries.global_ctxt()?.enter(|tcx| tcx.analysis(()))?;
- if callbacks.after_analysis(&handler, compiler, queries) == Compilation::Stop {
+ if callbacks.after_analysis(compiler, queries) == Compilation::Stop {
return early_exit();
}
@@ -696,12 +700,14 @@ pub fn list_metadata(
sess: &Session,
metadata_loader: &dyn MetadataLoader,
) -> Compilation {
- if sess.opts.unstable_opts.ls {
+ let ls_kinds = &sess.opts.unstable_opts.ls;
+ if !ls_kinds.is_empty() {
match sess.io.input {
Input::File(ref ifile) => {
let path = &(*ifile);
let mut v = Vec::new();
- locator::list_file_metadata(&sess.target, path, metadata_loader, &mut v).unwrap();
+ locator::list_file_metadata(&sess.target, path, metadata_loader, &mut v, ls_kinds)
+ .unwrap();
safe_println!("{}", String::from_utf8(v).unwrap());
}
Input::Str { .. } => {
@@ -858,11 +864,9 @@ fn print_crate_info(
use rustc_target::spec::current_apple_deployment_target;
if sess.target.is_like_osx {
- println_info!(
- "deployment_target={}",
- current_apple_deployment_target(&sess.target)
- .expect("unknown Apple target OS")
- )
+ let (major, minor) = current_apple_deployment_target(&sess.target)
+ .expect("unknown Apple target OS");
+ println_info!("deployment_target={}", format!("{major}.{minor}"))
} else {
handler
.early_error("only Apple targets currently support deployment version info")
@@ -1250,47 +1254,6 @@ fn parse_crate_attrs<'a>(sess: &'a Session) -> PResult<'a, ast::AttrVec> {
}
}
-/// Gets a list of extra command-line flags provided by the user, as strings.
-///
-/// This function is used during ICEs to show more information useful for
-/// debugging, since some ICEs only happens with non-default compiler flags
-/// (and the users don't always report them).
-fn extra_compiler_flags() -> Option<(Vec<String>, bool)> {
- let mut args = env::args_os().map(|arg| arg.to_string_lossy().to_string()).peekable();
-
- let mut result = Vec::new();
- let mut excluded_cargo_defaults = false;
- while let Some(arg) = args.next() {
- if let Some(a) = ICE_REPORT_COMPILER_FLAGS.iter().find(|a| arg.starts_with(*a)) {
- let content = if arg.len() == a.len() {
- // A space-separated option, like `-C incremental=foo` or `--crate-type rlib`
- match args.next() {
- Some(arg) => arg.to_string(),
- None => continue,
- }
- } else if arg.get(a.len()..a.len() + 1) == Some("=") {
- // An equals option, like `--crate-type=rlib`
- arg[a.len() + 1..].to_string()
- } else {
- // A non-space option, like `-Cincremental=foo`
- arg[a.len()..].to_string()
- };
- let option = content.split_once('=').map(|s| s.0).unwrap_or(&content);
- if ICE_REPORT_COMPILER_FLAGS_EXCLUDE.iter().any(|exc| option == *exc) {
- excluded_cargo_defaults = true;
- } else {
- result.push(a.to_string());
- match ICE_REPORT_COMPILER_FLAGS_STRIP_VALUE.iter().find(|s| option == **s) {
- Some(s) => result.push(format!("{s}=[REDACTED]")),
- None => result.push(content),
- }
- }
- }
- }
-
- if !result.is_empty() { Some((result, excluded_cargo_defaults)) } else { None }
-}
-
/// Runs a closure and catches unwinds triggered by fatal errors.
///
/// The compiler currently unwinds with a special sentinel value to abort
@@ -1477,7 +1440,7 @@ pub fn report_ice(info: &panic::PanicInfo<'_>, bug_report_url: &str, extra_info:
None
};
- if let Some((flags, excluded_cargo_defaults)) = extra_compiler_flags() {
+ if let Some((flags, excluded_cargo_defaults)) = rustc_session::utils::extra_compiler_flags() {
handler.emit_note(session_diagnostics::IceFlags { flags: flags.join(" ") });
if excluded_cargo_defaults {
handler.emit_note(session_diagnostics::IceExcludeCargoDefaults);
@@ -1517,72 +1480,6 @@ pub fn init_env_logger(handler: &EarlyErrorHandler, env: &str) {
}
}
-#[cfg(all(unix, any(target_env = "gnu", target_os = "macos")))]
-mod signal_handler {
- extern "C" {
- fn backtrace_symbols_fd(
- buffer: *const *mut libc::c_void,
- size: libc::c_int,
- fd: libc::c_int,
- );
- }
-
- extern "C" fn print_stack_trace(_: libc::c_int) {
- const MAX_FRAMES: usize = 256;
- static mut STACK_TRACE: [*mut libc::c_void; MAX_FRAMES] =
- [std::ptr::null_mut(); MAX_FRAMES];
- unsafe {
- let depth = libc::backtrace(STACK_TRACE.as_mut_ptr(), MAX_FRAMES as i32);
- if depth == 0 {
- return;
- }
- backtrace_symbols_fd(STACK_TRACE.as_ptr(), depth, 2);
- }
- }
-
- /// When an error signal (such as SIGABRT or SIGSEGV) is delivered to the
- /// process, print a stack trace and then exit.
- pub(super) fn install() {
- use std::alloc::{alloc, Layout};
-
- unsafe {
- let alt_stack_size: usize = min_sigstack_size() + 64 * 1024;
- let mut alt_stack: libc::stack_t = std::mem::zeroed();
- alt_stack.ss_sp = alloc(Layout::from_size_align(alt_stack_size, 1).unwrap()).cast();
- alt_stack.ss_size = alt_stack_size;
- libc::sigaltstack(&alt_stack, std::ptr::null_mut());
-
- let mut sa: libc::sigaction = std::mem::zeroed();
- sa.sa_sigaction = print_stack_trace as libc::sighandler_t;
- sa.sa_flags = libc::SA_NODEFER | libc::SA_RESETHAND | libc::SA_ONSTACK;
- libc::sigemptyset(&mut sa.sa_mask);
- libc::sigaction(libc::SIGSEGV, &sa, std::ptr::null_mut());
- }
- }
-
- /// Modern kernels on modern hardware can have dynamic signal stack sizes.
- #[cfg(any(target_os = "linux", target_os = "android"))]
- fn min_sigstack_size() -> usize {
- const AT_MINSIGSTKSZ: core::ffi::c_ulong = 51;
- let dynamic_sigstksz = unsafe { libc::getauxval(AT_MINSIGSTKSZ) };
- // If getauxval couldn't find the entry, it returns 0,
- // so take the higher of the "constant" and auxval.
- // This transparently supports older kernels which don't provide AT_MINSIGSTKSZ
- libc::MINSIGSTKSZ.max(dynamic_sigstksz as _)
- }
-
- /// Not all OS support hardware where this is needed.
- #[cfg(not(any(target_os = "linux", target_os = "android")))]
- fn min_sigstack_size() -> usize {
- libc::MINSIGSTKSZ
- }
-}
-
-#[cfg(not(all(unix, any(target_env = "gnu", target_os = "macos"))))]
-mod signal_handler {
- pub(super) fn install() {}
-}
-
pub fn main() -> ! {
let start_time = Instant::now();
let start_rss = get_resident_set_size();
diff --git a/compiler/rustc_driver_impl/src/signal_handler.rs b/compiler/rustc_driver_impl/src/signal_handler.rs
new file mode 100644
index 000000000..deca10822
--- /dev/null
+++ b/compiler/rustc_driver_impl/src/signal_handler.rs
@@ -0,0 +1,142 @@
+//! Signal handler for rustc
+//! Primarily used to extract a backtrace from stack overflow
+
+use std::alloc::{alloc, Layout};
+use std::{fmt, mem, ptr};
+
+extern "C" {
+ fn backtrace_symbols_fd(buffer: *const *mut libc::c_void, size: libc::c_int, fd: libc::c_int);
+}
+
+fn backtrace_stderr(buffer: &[*mut libc::c_void]) {
+ let size = buffer.len().try_into().unwrap_or_default();
+ unsafe { backtrace_symbols_fd(buffer.as_ptr(), size, libc::STDERR_FILENO) };
+}
+
+/// Unbuffered, unsynchronized writer to stderr.
+///
+/// Only acceptable because everything will end soon anyways.
+struct RawStderr(());
+
+impl fmt::Write for RawStderr {
+ fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
+ let ret = unsafe { libc::write(libc::STDERR_FILENO, s.as_ptr().cast(), s.len()) };
+ if ret == -1 { Err(fmt::Error) } else { Ok(()) }
+ }
+}
+
+/// We don't really care how many bytes we actually get out. SIGSEGV comes for our head.
+/// Splash stderr with letters of our own blood to warn our friends about the monster.
+macro raw_errln($tokens:tt) {
+ let _ = ::core::fmt::Write::write_fmt(&mut RawStderr(()), format_args!($tokens));
+ let _ = ::core::fmt::Write::write_char(&mut RawStderr(()), '\n');
+}
+
+/// Signal handler installed for SIGSEGV
+extern "C" fn print_stack_trace(_: libc::c_int) {
+ const MAX_FRAMES: usize = 256;
+ // Reserve data segment so we don't have to malloc in a signal handler, which might fail
+ // in incredibly undesirable and unexpected ways due to e.g. the allocator deadlocking
+ static mut STACK_TRACE: [*mut libc::c_void; MAX_FRAMES] = [ptr::null_mut(); MAX_FRAMES];
+ let stack = unsafe {
+ // Collect return addresses
+ let depth = libc::backtrace(STACK_TRACE.as_mut_ptr(), MAX_FRAMES as i32);
+ if depth == 0 {
+ return;
+ }
+ &STACK_TRACE.as_slice()[0..(depth as _)]
+ };
+
+ // Just a stack trace is cryptic. Explain what we're doing.
+ raw_errln!("error: rustc interrupted by SIGSEGV, printing backtrace\n");
+ let mut written = 1;
+ let mut consumed = 0;
+ // Begin elaborating return addrs into symbols and writing them directly to stderr
+ // Most backtraces are stack overflow, most stack overflows are from recursion
+ // Check for cycles before writing 250 lines of the same ~5 symbols
+ let cycled = |(runner, walker)| runner == walker;
+ let mut cyclic = false;
+ if let Some(period) = stack.iter().skip(1).step_by(2).zip(stack).position(cycled) {
+ let period = period.saturating_add(1); // avoid "what if wrapped?" branches
+ let Some(offset) = stack.iter().skip(period).zip(stack).position(cycled) else {
+ // impossible.
+ return;
+ };
+
+ // Count matching trace slices, else we could miscount "biphasic cycles"
+ // with the same period + loop entry but a different inner loop
+ let next_cycle = stack[offset..].chunks_exact(period).skip(1);
+ let cycles = 1 + next_cycle
+ .zip(stack[offset..].chunks_exact(period))
+ .filter(|(next, prev)| next == prev)
+ .count();
+ backtrace_stderr(&stack[..offset]);
+ written += offset;
+ consumed += offset;
+ if cycles > 1 {
+ raw_errln!("\n### cycle encountered after {offset} frames with period {period}");
+ backtrace_stderr(&stack[consumed..consumed + period]);
+ raw_errln!("### recursed {cycles} times\n");
+ written += period + 4;
+ consumed += period * cycles;
+ cyclic = true;
+ };
+ }
+ let rem = &stack[consumed..];
+ backtrace_stderr(rem);
+ raw_errln!("");
+ written += rem.len() + 1;
+
+ let random_depth = || 8 * 16; // chosen by random diceroll (2d20)
+ if cyclic || stack.len() > random_depth() {
+ // technically speculation, but assert it with confidence anyway.
+ // rustc only arrived in this signal handler because bad things happened
+ // and this message is for explaining it's not the programmer's fault
+ raw_errln!("note: rustc unexpectedly overflowed its stack! this is a bug");
+ written += 1;
+ }
+ if stack.len() == MAX_FRAMES {
+ raw_errln!("note: maximum backtrace depth reached, frames may have been lost");
+ written += 1;
+ }
+ raw_errln!("note: we would appreciate a report at https://github.com/rust-lang/rust");
+ written += 1;
+ if written > 24 {
+ // We probably just scrolled the earlier "we got SIGSEGV" message off the terminal
+ raw_errln!("note: backtrace dumped due to SIGSEGV! resuming signal");
+ };
+}
+
+/// When SIGSEGV is delivered to the process, print a stack trace and then exit.
+pub(super) fn install() {
+ unsafe {
+ let alt_stack_size: usize = min_sigstack_size() + 64 * 1024;
+ let mut alt_stack: libc::stack_t = mem::zeroed();
+ alt_stack.ss_sp = alloc(Layout::from_size_align(alt_stack_size, 1).unwrap()).cast();
+ alt_stack.ss_size = alt_stack_size;
+ libc::sigaltstack(&alt_stack, ptr::null_mut());
+
+ let mut sa: libc::sigaction = mem::zeroed();
+ sa.sa_sigaction = print_stack_trace as libc::sighandler_t;
+ sa.sa_flags = libc::SA_NODEFER | libc::SA_RESETHAND | libc::SA_ONSTACK;
+ libc::sigemptyset(&mut sa.sa_mask);
+ libc::sigaction(libc::SIGSEGV, &sa, ptr::null_mut());
+ }
+}
+
+/// Modern kernels on modern hardware can have dynamic signal stack sizes.
+#[cfg(any(target_os = "linux", target_os = "android"))]
+fn min_sigstack_size() -> usize {
+ const AT_MINSIGSTKSZ: core::ffi::c_ulong = 51;
+ let dynamic_sigstksz = unsafe { libc::getauxval(AT_MINSIGSTKSZ) };
+ // If getauxval couldn't find the entry, it returns 0,
+ // so take the higher of the "constant" and auxval.
+ // This transparently supports older kernels which don't provide AT_MINSIGSTKSZ
+ libc::MINSIGSTKSZ.max(dynamic_sigstksz as _)
+}
+
+/// Not all OS support hardware where this is needed.
+#[cfg(not(any(target_os = "linux", target_os = "android")))]
+fn min_sigstack_size() -> usize {
+ libc::MINSIGSTKSZ
+}
diff --git a/compiler/rustc_error_codes/src/error_codes.rs b/compiler/rustc_error_codes/src/error_codes.rs
index d104ff089..89c44c6ec 100644
--- a/compiler/rustc_error_codes/src/error_codes.rs
+++ b/compiler/rustc_error_codes/src/error_codes.rs
@@ -516,7 +516,8 @@ E0793: include_str!("./error_codes/E0793.md"),
E0794: include_str!("./error_codes/E0794.md"),
}
-// Undocumented removed error codes. Note that many removed error codes are documented.
+// Undocumented removed error codes. Note that many removed error codes are kept in the list above
+// and marked as no-longer emitted with a note in the markdown file (see E0001 for an example).
// E0006, // merged with E0005
// E0008, // cannot bind by-move into a pattern guard
// E0019, // merged into E0015
@@ -607,6 +608,7 @@ E0794: include_str!("./error_codes/E0794.md"),
// E0420, // merged into 532
// E0421, // merged into 531
// E0427, // merged into 530
+// E0445, // merged into 446 and type privacy lints
// E0456, // plugin `..` is not available for triple `..`
// E0465, // removed: merged with E0464
// E0467, // removed
diff --git a/compiler/rustc_error_codes/src/error_codes/E0038.md b/compiler/rustc_error_codes/src/error_codes/E0038.md
index 584b78554..8f8eabb15 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0038.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0038.md
@@ -162,14 +162,13 @@ fn foo<T>(x: T) {
```
The machine code for `foo::<u8>()`, `foo::<bool>()`, `foo::<String>()`, or any
-other type substitution is different. Hence the compiler generates the
+other instantiation is different. Hence the compiler generates the
implementation on-demand. If you call `foo()` with a `bool` parameter, the
compiler will only generate code for `foo::<bool>()`. When we have additional
type parameters, the number of monomorphized implementations the compiler
generates does not grow drastically, since the compiler will only generate an
-implementation if the function is called with unparameterized substitutions
-(i.e., substitutions where none of the substituted types are themselves
-parameterized).
+implementation if the function is called with fully concrete arguments
+(i.e., arguments which do not contain any generic parameters).
However, with trait objects we have to make a table containing _every_ object
that implements the trait. Now, if it has type parameters, we need to add
diff --git a/compiler/rustc_error_codes/src/error_codes/E0094.md b/compiler/rustc_error_codes/src/error_codes/E0094.md
index 67a8c3678..d8c1a3cb5 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0094.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0094.md
@@ -3,7 +3,7 @@ An invalid number of generic parameters was passed to an intrinsic function.
Erroneous code example:
```compile_fail,E0094
-#![feature(intrinsics)]
+#![feature(intrinsics, rustc_attrs)]
#![allow(internal_features)]
extern "rust-intrinsic" {
@@ -18,7 +18,7 @@ and verify with the function declaration in the Rust source code.
Example:
```
-#![feature(intrinsics)]
+#![feature(intrinsics, rustc_attrs)]
#![allow(internal_features)]
extern "rust-intrinsic" {
diff --git a/compiler/rustc_error_codes/src/error_codes/E0191.md b/compiler/rustc_error_codes/src/error_codes/E0191.md
index 46b773bdc..344ac2216 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0191.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0191.md
@@ -7,8 +7,8 @@ trait Trait {
type Bar;
}
-type Foo = Trait; // error: the value of the associated type `Bar` (from
- // the trait `Trait`) must be specified
+type Foo = dyn Trait; // error: the value of the associated type `Bar` (from
+ // the trait `Trait`) must be specified
```
Trait objects need to have all associated types specified. Please verify that
@@ -20,5 +20,5 @@ trait Trait {
type Bar;
}
-type Foo = Trait<Bar=i32>; // ok!
+type Foo = dyn Trait<Bar=i32>; // ok!
```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0211.md b/compiler/rustc_error_codes/src/error_codes/E0211.md
index 70f14fffa..19a482f6c 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0211.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0211.md
@@ -4,7 +4,7 @@ You used a function or type which doesn't fit the requirements for where it was
used. Erroneous code examples:
```compile_fail
-#![feature(intrinsics)]
+#![feature(intrinsics, rustc_attrs)]
#![allow(internal_features)]
extern "rust-intrinsic" {
@@ -41,7 +41,7 @@ impl Foo {
For the first code example, please check the function definition. Example:
```
-#![feature(intrinsics)]
+#![feature(intrinsics, rustc_attrs)]
#![allow(internal_features)]
extern "rust-intrinsic" {
diff --git a/compiler/rustc_error_codes/src/error_codes/E0401.md b/compiler/rustc_error_codes/src/error_codes/E0401.md
index 4c93053d5..45d083681 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0401.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0401.md
@@ -1,4 +1,4 @@
-Inner items do not inherit type or const parameters from the functions
+Inner items do not inherit the generic parameters from the items
they are embedded in.
Erroneous code example:
@@ -32,8 +32,8 @@ fn foo<T>(x: T) {
}
```
-Items inside functions are basically just like top-level items, except
-that they can only be used from the function they are in.
+Items nested inside other items are basically just like top-level items, except
+that they can only be used from the item they are in.
There are a couple of solutions for this.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0445.md b/compiler/rustc_error_codes/src/error_codes/E0445.md
index e6a28a9c2..d47393194 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0445.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0445.md
@@ -1,10 +1,10 @@
-A private trait was used on a public type parameter bound.
+#### Note: this error code is no longer emitted by the compiler.
-Erroneous code examples:
+A private trait was used on a public type parameter bound.
-```compile_fail,E0445
-#![deny(private_in_public)]
+Previously erroneous code examples:
+```
trait Foo {
fn dummy(&self) { }
}
diff --git a/compiler/rustc_error_codes/src/error_codes/E0446.md b/compiler/rustc_error_codes/src/error_codes/E0446.md
index 6ec47c496..ebbd83c68 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0446.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0446.md
@@ -1,16 +1,16 @@
-A private type was used in a public type signature.
+A private type or trait was used in a public associated type signature.
Erroneous code example:
```compile_fail,E0446
-#![deny(private_in_public)]
-struct Bar(u32);
-
-mod foo {
- use crate::Bar;
- pub fn bar() -> Bar { // error: private type in public interface
- Bar(0)
- }
+struct Bar;
+
+pub trait PubTr {
+ type Alias;
+}
+
+impl PubTr for u8 {
+ type Alias = Bar; // error private type in public interface
}
fn main() {}
@@ -22,13 +22,14 @@ This is done by using pub(crate) or pub(in crate::my_mod::etc)
Example:
```
-struct Bar(u32);
+struct Bar;
+
+pub(crate) trait PubTr { // only public to crate root
+ type Alias;
+}
-mod foo {
- use crate::Bar;
- pub(crate) fn bar() -> Bar { // only public to crate root
- Bar(0)
- }
+impl PubTr for u8 {
+ type Alias = Bar;
}
fn main() {}
@@ -38,12 +39,15 @@ The other way to solve this error is to make the private type public.
Example:
```
-pub struct Bar(u32); // we set the Bar type public
-mod foo {
- use crate::Bar;
- pub fn bar() -> Bar { // ok!
- Bar(0)
- }
+
+pub struct Bar; // we set the Bar trait public
+
+pub trait PubTr {
+ type Alias;
+}
+
+impl PubTr for u8 {
+ type Alias = Bar;
}
fn main() {}
diff --git a/compiler/rustc_error_codes/src/error_codes/E0647.md b/compiler/rustc_error_codes/src/error_codes/E0647.md
index 8ca6e777f..59bb47ba6 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0647.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0647.md
@@ -7,7 +7,7 @@ Erroneous code example:
#[start]
fn start(_: isize, _: *const *const u8) -> isize where (): Copy {
- //^ error: start function is not allowed to have a where clause
+ //^ error: `#[start]` function is not allowed to have a where clause
0
}
```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0691.md b/compiler/rustc_error_codes/src/error_codes/E0691.md
index 483c74c0f..a5bedd61e 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0691.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0691.md
@@ -1,9 +1,11 @@
+#### Note: this error code is no longer emitted by the compiler.
+
A struct, enum, or union with the `repr(transparent)` representation hint
contains a zero-sized field that requires non-trivial alignment.
Erroneous code example:
-```compile_fail,E0691
+```ignore (error is no longer emitted)
#![feature(repr_align)]
#[repr(align(32))]
diff --git a/compiler/rustc_error_codes/src/error_codes/E0698.md b/compiler/rustc_error_codes/src/error_codes/E0698.md
index 3ba992a84..9bc652e64 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0698.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0698.md
@@ -1,9 +1,11 @@
+#### Note: this error code is no longer emitted by the compiler.
+
When using generators (or async) all type variables must be bound so a
generator can be constructed.
Erroneous code example:
-```edition2018,compile_fail,E0698
+```edition2018,compile_fail,E0282
async fn bar<T>() -> () {}
async fn foo() {
diff --git a/compiler/rustc_error_codes/src/error_codes/E0760.md b/compiler/rustc_error_codes/src/error_codes/E0760.md
index 85e5faada..9c4739f0d 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0760.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0760.md
@@ -5,7 +5,7 @@ or `Self` that references lifetimes from a parent scope.
Erroneous code example:
-```compile_fail,edition2018
+```ignore,edition2018
struct S<'a>(&'a i32);
impl<'a> S<'a> {
diff --git a/compiler/rustc_error_codes/src/error_codes/E0788.md b/compiler/rustc_error_codes/src/error_codes/E0788.md
index d26f9b594..d655e51fa 100644
--- a/compiler/rustc_error_codes/src/error_codes/E0788.md
+++ b/compiler/rustc_error_codes/src/error_codes/E0788.md
@@ -1,4 +1,4 @@
-A `#[no_coverage]` attribute was applied to something which does not show up
+A `#[coverage]` attribute was applied to something which does not show up
in code coverage, or is too granular to be excluded from the coverage report.
For now, this attribute can only be applied to function, method, and closure
@@ -9,18 +9,18 @@ will just emit an `unused_attributes` lint instead of this error.
Example of erroneous code:
```compile_fail,E0788
-#[no_coverage]
+#[coverage(off)]
struct Foo;
-#[no_coverage]
+#[coverage(on)]
const FOO: Foo = Foo;
```
-`#[no_coverage]` tells the compiler to not generate coverage instrumentation for
-a piece of code when the `-C instrument-coverage` flag is passed. Things like
-structs and consts are not coverable code, and thus cannot do anything with this
-attribute.
+`#[coverage(off)]` tells the compiler to not generate coverage instrumentation
+for a piece of code when the `-C instrument-coverage` flag is passed. Things
+like structs and consts are not coverable code, and thus cannot do anything
+with this attribute.
If you wish to apply this attribute to all methods in an impl or module,
manually annotate each method; it is not possible to annotate the entire impl
-with a `#[no_coverage]` attribute.
+with a `#[coverage]` attribute.
diff --git a/compiler/rustc_error_messages/src/lib.rs b/compiler/rustc_error_messages/src/lib.rs
index 3bf155050..6c2914456 100644
--- a/compiler/rustc_error_messages/src/lib.rs
+++ b/compiler/rustc_error_messages/src/lib.rs
@@ -4,7 +4,7 @@
#![feature(type_alias_impl_trait)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#[macro_use]
extern crate tracing;
diff --git a/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs b/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs
index a88fba6da..203e52912 100644
--- a/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs
+++ b/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs
@@ -91,7 +91,7 @@ fn annotation_type_for_level(level: Level) -> AnnotationType {
}
Level::Warning(_) => AnnotationType::Warning,
Level::Note | Level::OnceNote => AnnotationType::Note,
- Level::Help => AnnotationType::Help,
+ Level::Help | Level::OnceHelp => AnnotationType::Help,
// FIXME(#59346): Not sure how to map this level
Level::FailureNote => AnnotationType::Error,
Level::Allow => panic!("Should not call with Allow"),
@@ -169,7 +169,8 @@ impl AnnotateSnippetEmitterWriter {
.map(|line| {
// Ensure the source file is present before we try
// to load a string from it.
- source_map.ensure_source_file_source_present(file.clone());
+ // FIXME(#115869): support -Z ignore-directory-in-diagnostics-source-blocks
+ source_map.ensure_source_file_source_present(&file);
(
format!("{}", source_map.filename_for_diagnostics(&file.name)),
source_string(file.clone(), &line),
diff --git a/compiler/rustc_errors/src/diagnostic.rs b/compiler/rustc_errors/src/diagnostic.rs
index a96e317df..470f318eb 100644
--- a/compiler/rustc_errors/src/diagnostic.rs
+++ b/compiler/rustc_errors/src/diagnostic.rs
@@ -151,7 +151,12 @@ impl fmt::Display for DiagnosticLocation {
#[derive(Clone, Debug, PartialEq, Eq, Hash, Encodable, Decodable)]
pub enum DiagnosticId {
Error(String),
- Lint { name: String, has_future_breakage: bool, is_force_warn: bool },
+ Lint {
+ name: String,
+ /// Indicates whether this lint should show up in cargo's future breakage report.
+ has_future_breakage: bool,
+ is_force_warn: bool,
+ },
}
/// A "sub"-diagnostic attached to a parent diagnostic.
@@ -270,6 +275,7 @@ impl Diagnostic {
| Level::Note
| Level::OnceNote
| Level::Help
+ | Level::OnceHelp
| Level::Allow
| Level::Expect(_) => false,
}
@@ -300,6 +306,7 @@ impl Diagnostic {
}
}
+ /// Indicates whether this diagnostic should show up in cargo's future breakage report.
pub fn has_future_breakage(&self) -> bool {
match self.code {
Some(DiagnosticId::Lint { has_future_breakage, .. }) => has_future_breakage,
@@ -532,6 +539,13 @@ impl Diagnostic {
self
}
+ /// Prints the span with a help above it.
+ /// This is like [`Diagnostic::help()`], but it gets its own span.
+ pub fn help_once(&mut self, msg: impl Into<SubdiagnosticMessage>) -> &mut Self {
+ self.sub(Level::OnceHelp, msg, MultiSpan::new(), None);
+ self
+ }
+
/// Add a help message attached to this diagnostic with a customizable highlighted message.
pub fn highlighted_help(&mut self, msg: Vec<(String, Style)>) -> &mut Self {
self.sub_with_highlights(Level::Help, msg, MultiSpan::new(), None);
diff --git a/compiler/rustc_errors/src/diagnostic_impls.rs b/compiler/rustc_errors/src/diagnostic_impls.rs
index a170e3a89..4f77f09b2 100644
--- a/compiler/rustc_errors/src/diagnostic_impls.rs
+++ b/compiler/rustc_errors/src/diagnostic_impls.rs
@@ -161,7 +161,7 @@ impl IntoDiagnosticArg for hir::ConstContext {
DiagnosticArgValue::Str(Cow::Borrowed(match self {
hir::ConstContext::ConstFn => "const_fn",
hir::ConstContext::Static(_) => "static",
- hir::ConstContext::Const => "const",
+ hir::ConstContext::Const { .. } => "const",
}))
}
}
diff --git a/compiler/rustc_errors/src/emitter.rs b/compiler/rustc_errors/src/emitter.rs
index 0cae06881..d322cbe9d 100644
--- a/compiler/rustc_errors/src/emitter.rs
+++ b/compiler/rustc_errors/src/emitter.rs
@@ -8,7 +8,7 @@
//! The output types are defined in `rustc_session::config::ErrorOutputType`.
use rustc_span::source_map::SourceMap;
-use rustc_span::{FileLines, SourceFile, Span};
+use rustc_span::{FileLines, FileName, SourceFile, Span};
use crate::snippet::{
Annotation, AnnotationColumn, AnnotationType, Line, MultilineAnnotation, Style, StyledString,
@@ -24,7 +24,7 @@ use rustc_lint_defs::pluralize;
use derive_setters::Setters;
use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
-use rustc_data_structures::sync::Lrc;
+use rustc_data_structures::sync::{DynSend, IntoDynSyncSend, Lrc};
use rustc_error_messages::{FluentArgs, SpanLabel};
use rustc_span::hygiene::{ExpnKind, MacroKind};
use std::borrow::Cow;
@@ -188,6 +188,8 @@ impl Margin {
const ANONYMIZED_LINE_NUM: &str = "LL";
+pub type DynEmitter = dyn Emitter + DynSend;
+
/// Emitter trait for emitting errors.
pub trait Emitter: Translate {
/// Emit a structured diagnostic.
@@ -625,7 +627,7 @@ impl ColorConfig {
#[derive(Setters)]
pub struct EmitterWriter {
#[setters(skip)]
- dst: Destination,
+ dst: IntoDynSyncSend<Destination>,
sm: Option<Lrc<SourceMap>>,
fluent_bundle: Option<Lrc<FluentBundle>>,
#[setters(skip)]
@@ -633,6 +635,7 @@ pub struct EmitterWriter {
short_message: bool,
teach: bool,
ui_testing: bool,
+ ignored_directories_in_source_blocks: Vec<String>,
diagnostic_width: Option<usize>,
macro_backtrace: bool,
@@ -655,13 +658,14 @@ impl EmitterWriter {
fn create(dst: Destination, fallback_bundle: LazyFallbackBundle) -> EmitterWriter {
EmitterWriter {
- dst,
+ dst: IntoDynSyncSend(dst),
sm: None,
fluent_bundle: None,
fallback_bundle,
short_message: false,
teach: false,
ui_testing: false,
+ ignored_directories_in_source_blocks: Vec::new(),
diagnostic_width: None,
macro_backtrace: false,
track_diagnostics: false,
@@ -1191,7 +1195,7 @@ impl EmitterWriter {
let will_be_emitted = |span: Span| {
!span.is_dummy() && {
let file = sm.lookup_source_file(span.hi());
- sm.ensure_source_file_source_present(file)
+ should_show_source_code(&self.ignored_directories_in_source_blocks, sm, &file)
}
};
@@ -1386,7 +1390,11 @@ impl EmitterWriter {
// Print out the annotate source lines that correspond with the error
for annotated_file in annotated_files {
// we can't annotate anything if the source is unavailable.
- if !sm.ensure_source_file_source_present(annotated_file.file.clone()) {
+ if !should_show_source_code(
+ &self.ignored_directories_in_source_blocks,
+ sm,
+ &annotated_file.file,
+ ) {
if !self.short_message {
// We'll just print an unannotated message.
for (annotation_id, line) in annotated_file.lines.iter().enumerate() {
@@ -2346,7 +2354,13 @@ impl FileWithAnnotatedLines {
}
let label = label.as_ref().map(|m| {
- emitter.translate_message(m, args).map_err(Report::new).unwrap().to_string()
+ normalize_whitespace(
+ &emitter
+ .translate_message(m, &args)
+ .map_err(Report::new)
+ .unwrap()
+ .to_string(),
+ )
});
if lo.line != hi.line {
@@ -2729,3 +2743,18 @@ pub fn is_case_difference(sm: &SourceMap, suggested: &str, sp: Span) -> bool {
// bug, but be defensive against that here.
&& found != suggested
}
+
+pub(crate) fn should_show_source_code(
+ ignored_directories: &[String],
+ sm: &SourceMap,
+ file: &SourceFile,
+) -> bool {
+ if !sm.ensure_source_file_source_present(file) {
+ return false;
+ }
+
+ let FileName::Real(name) = &file.name else { return true };
+ name.local_path()
+ .map(|path| ignored_directories.iter().all(|dir| !path.starts_with(dir)))
+ .unwrap_or(true)
+}
diff --git a/compiler/rustc_errors/src/json.rs b/compiler/rustc_errors/src/json.rs
index b8f58e305..0cb75c71b 100644
--- a/compiler/rustc_errors/src/json.rs
+++ b/compiler/rustc_errors/src/json.rs
@@ -12,7 +12,7 @@
use rustc_span::source_map::{FilePathMapping, SourceMap};
use termcolor::{ColorSpec, WriteColor};
-use crate::emitter::{Emitter, HumanReadableErrorType};
+use crate::emitter::{should_show_source_code, Emitter, HumanReadableErrorType};
use crate::registry::Registry;
use crate::translation::{to_fluent_args, Translate};
use crate::DiagnosticId;
@@ -22,7 +22,7 @@ use crate::{
};
use rustc_lint_defs::Applicability;
-use rustc_data_structures::sync::Lrc;
+use rustc_data_structures::sync::{IntoDynSyncSend, Lrc};
use rustc_error_messages::FluentArgs;
use rustc_span::hygiene::ExpnData;
use rustc_span::Span;
@@ -38,13 +38,14 @@ use serde::Serialize;
mod tests;
pub struct JsonEmitter {
- dst: Box<dyn Write + Send>,
+ dst: IntoDynSyncSend<Box<dyn Write + Send>>,
registry: Option<Registry>,
sm: Lrc<SourceMap>,
fluent_bundle: Option<Lrc<FluentBundle>>,
fallback_bundle: LazyFallbackBundle,
pretty: bool,
ui_testing: bool,
+ ignored_directories_in_source_blocks: Vec<String>,
json_rendered: HumanReadableErrorType,
diagnostic_width: Option<usize>,
macro_backtrace: bool,
@@ -66,13 +67,14 @@ impl JsonEmitter {
terminal_url: TerminalUrl,
) -> JsonEmitter {
JsonEmitter {
- dst: Box::new(io::BufWriter::new(io::stderr())),
+ dst: IntoDynSyncSend(Box::new(io::BufWriter::new(io::stderr()))),
registry,
sm: source_map,
fluent_bundle,
fallback_bundle,
pretty,
ui_testing: false,
+ ignored_directories_in_source_blocks: Vec::new(),
json_rendered,
diagnostic_width,
macro_backtrace,
@@ -120,13 +122,14 @@ impl JsonEmitter {
terminal_url: TerminalUrl,
) -> JsonEmitter {
JsonEmitter {
- dst,
+ dst: IntoDynSyncSend(dst),
registry,
sm: source_map,
fluent_bundle,
fallback_bundle,
pretty,
ui_testing: false,
+ ignored_directories_in_source_blocks: Vec::new(),
json_rendered,
diagnostic_width,
macro_backtrace,
@@ -138,6 +141,10 @@ impl JsonEmitter {
pub fn ui_testing(self, ui_testing: bool) -> Self {
Self { ui_testing, ..self }
}
+
+ pub fn ignored_directories_in_source_blocks(self, value: Vec<String>) -> Self {
+ Self { ignored_directories_in_source_blocks: value, ..self }
+ }
}
impl Translate for JsonEmitter {
@@ -381,6 +388,7 @@ impl Diagnostic {
.track_diagnostics(je.track_diagnostics)
.terminal_url(je.terminal_url)
.ui_testing(je.ui_testing)
+ .ignored_directories_in_source_blocks(je.ignored_directories_in_source_blocks.clone())
.emit_diagnostic(diag);
let output = Arc::try_unwrap(output.0).unwrap().into_inner().unwrap();
let output = String::from_utf8(output).unwrap();
@@ -558,7 +566,11 @@ impl DiagnosticSpanLine {
.span_to_lines(span)
.map(|lines| {
// We can't get any lines if the source is unavailable.
- if !je.sm.ensure_source_file_source_present(lines.file.clone()) {
+ if !should_show_source_code(
+ &je.ignored_directories_in_source_blocks,
+ &je.sm,
+ &lines.file,
+ ) {
return vec![];
}
diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs
index 34518b537..b747a62b8 100644
--- a/compiler/rustc_errors/src/lib.rs
+++ b/compiler/rustc_errors/src/lib.rs
@@ -15,7 +15,7 @@
#![feature(box_patterns)]
#![feature(error_reporter)]
#![allow(incomplete_features)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#[macro_use]
extern crate rustc_macros;
@@ -30,11 +30,11 @@ pub use emitter::ColorConfig;
use rustc_lint_defs::LintExpectationId;
use Level::*;
-use emitter::{is_case_difference, Emitter, EmitterWriter};
+use emitter::{is_case_difference, DynEmitter, Emitter, EmitterWriter};
use registry::Registry;
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet};
use rustc_data_structures::stable_hasher::{Hash128, StableHasher};
-use rustc_data_structures::sync::{self, IntoDynSyncSend, Lock, Lrc};
+use rustc_data_structures::sync::{Lock, Lrc};
use rustc_data_structures::AtomicRef;
pub use rustc_error_messages::{
fallback_fluent_bundle, fluent_bundle, DelayDm, DiagnosticMessage, FluentBundle,
@@ -44,7 +44,7 @@ use rustc_fluent_macro::fluent_messages;
pub use rustc_lint_defs::{pluralize, Applicability};
use rustc_span::source_map::SourceMap;
pub use rustc_span::ErrorGuaranteed;
-use rustc_span::{Loc, Span};
+use rustc_span::{Loc, Span, DUMMY_SP};
use std::borrow::Cow;
use std::error::Report;
@@ -55,7 +55,9 @@ use std::num::NonZeroUsize;
use std::panic;
use std::path::{Path, PathBuf};
-use termcolor::{Color, ColorSpec};
+// Used by external projects such as `rust-gpu`.
+// See https://github.com/rust-lang/rust/pull/115393.
+pub use termcolor::{Color, ColorSpec, WriteColor};
pub mod annotate_snippet_emitter_writer;
mod diagnostic;
@@ -197,8 +199,14 @@ impl CodeSuggestion {
use rustc_span::{CharPos, Pos};
- /// Append to a buffer the remainder of the line of existing source code, and return the
- /// count of lines that have been added for accurate highlighting.
+ /// Extracts a substring from the provided `line_opt` based on the specified low and high indices,
+ /// appends it to the given buffer `buf`, and returns the count of newline characters in the substring
+ /// for accurate highlighting.
+ /// If `line_opt` is `None`, a newline character is appended to the buffer, and 0 is returned.
+ ///
+ /// ## Returns
+ ///
+ /// The count of newline characters in the extracted substring.
fn push_trailing(
buf: &mut String,
line_opt: Option<&Cow<'_, str>>,
@@ -206,22 +214,30 @@ impl CodeSuggestion {
hi_opt: Option<&Loc>,
) -> usize {
let mut line_count = 0;
+ // Convert CharPos to Usize, as CharPose is character offset
+ // Extract low index and high index
let (lo, hi_opt) = (lo.col.to_usize(), hi_opt.map(|hi| hi.col.to_usize()));
if let Some(line) = line_opt {
if let Some(lo) = line.char_indices().map(|(i, _)| i).nth(lo) {
+ // Get high index while account for rare unicode and emoji with char_indices
let hi_opt = hi_opt.and_then(|hi| line.char_indices().map(|(i, _)| i).nth(hi));
match hi_opt {
+ // If high index exist, take string from low to high index
Some(hi) if hi > lo => {
+ // count how many '\n' exist
line_count = line[lo..hi].matches('\n').count();
buf.push_str(&line[lo..hi])
}
Some(_) => (),
+ // If high index absence, take string from low index till end string.len
None => {
+ // count how many '\n' exist
line_count = line[lo..].matches('\n').count();
buf.push_str(&line[lo..])
}
}
}
+ // If high index is None
if hi_opt.is_none() {
buf.push('\n');
}
@@ -257,7 +273,7 @@ impl CodeSuggestion {
assert!(!lines.lines.is_empty() || bounding_span.is_dummy());
// We can't splice anything if the source is unavailable.
- if !sm.ensure_source_file_source_present(lines.file.clone()) {
+ if !sm.ensure_source_file_source_present(&lines.file) {
return None;
}
@@ -414,7 +430,7 @@ struct HandlerInner {
err_count: usize,
warn_count: usize,
deduplicated_err_count: usize,
- emitter: IntoDynSyncSend<Box<dyn Emitter + sync::Send>>,
+ emitter: Box<DynEmitter>,
delayed_span_bugs: Vec<DelayedDiagnostic>,
delayed_good_path_bugs: Vec<DelayedDiagnostic>,
/// This flag indicates that an expected diagnostic was emitted and suppressed.
@@ -503,7 +519,7 @@ pub struct HandlerFlags {
/// If false, warning-level lints are suppressed.
/// (rustc: see `--allow warnings` and `--cap-lints`)
pub can_emit_warnings: bool,
- /// If true, error-level diagnostics are upgraded to bug-level.
+ /// If Some, the Nth error-level diagnostic is upgraded to bug-level.
/// (rustc: see `-Z treat-err-as-bug`)
pub treat_err_as_bug: Option<NonZeroUsize>,
/// If true, immediately emit diagnostics that would otherwise be buffered.
@@ -580,7 +596,7 @@ impl Handler {
self
}
- pub fn with_emitter(emitter: Box<dyn Emitter + sync::Send>) -> Self {
+ pub fn with_emitter(emitter: Box<DynEmitter>) -> Self {
Self {
inner: Lock::new(HandlerInner {
flags: HandlerFlags { can_emit_warnings: true, ..Default::default() },
@@ -589,7 +605,7 @@ impl Handler {
warn_count: 0,
deduplicated_err_count: 0,
deduplicated_warn_count: 0,
- emitter: IntoDynSyncSend(emitter),
+ emitter,
delayed_span_bugs: Vec::new(),
delayed_good_path_bugs: Vec::new(),
suppressed_expected_diag: false,
@@ -1374,7 +1390,7 @@ impl HandlerInner {
debug!(?self.emitted_diagnostics);
let already_emitted_sub = |sub: &mut SubDiagnostic| {
debug!(?sub);
- if sub.level != Level::OnceNote {
+ if sub.level != Level::OnceNote && sub.level != Level::OnceHelp {
return false;
}
let mut hasher = StableHasher::new();
@@ -1703,19 +1719,17 @@ impl HandlerInner {
match (
self.err_count() + self.lint_err_count,
self.delayed_bug_count(),
- self.flags.treat_err_as_bug.map(|c| c.get()).unwrap_or(0),
+ self.flags.treat_err_as_bug.map(|c| c.get()).unwrap(),
) {
(1, 0, 1) => panic!("aborting due to `-Z treat-err-as-bug=1`"),
(0, 1, 1) => panic!("aborting due delayed bug with `-Z treat-err-as-bug=1`"),
- (count, delayed_count, as_bug) => {
+ (count, delayed_count, val) => {
if delayed_count > 0 {
panic!(
- "aborting after {count} errors and {delayed_count} delayed bugs due to `-Z treat-err-as-bug={as_bug}`",
+ "aborting after {count} errors and {delayed_count} delayed bugs due to `-Z treat-err-as-bug={val}`",
)
} else {
- panic!(
- "aborting after {count} errors due to `-Z treat-err-as-bug={as_bug}`",
- )
+ panic!("aborting after {count} errors due to `-Z treat-err-as-bug={val}`")
}
}
}
@@ -1738,7 +1752,7 @@ impl DelayedDiagnostic {
BacktraceStatus::Captured => {
let inner = &self.inner;
self.inner.subdiagnostic(DelayedAtWithNewline {
- span: inner.span.primary_span().unwrap(),
+ span: inner.span.primary_span().unwrap_or(DUMMY_SP),
emitted_at: inner.emitted_at.clone(),
note: self.note,
});
@@ -1748,7 +1762,7 @@ impl DelayedDiagnostic {
_ => {
let inner = &self.inner;
self.inner.subdiagnostic(DelayedAtWithoutNewline {
- span: inner.span.primary_span().unwrap(),
+ span: inner.span.primary_span().unwrap_or(DUMMY_SP),
emitted_at: inner.emitted_at.clone(),
note: self.note,
});
@@ -1776,6 +1790,8 @@ pub enum Level {
/// A note that is only emitted once.
OnceNote,
Help,
+ /// A help that is only emitted once.
+ OnceHelp,
FailureNote,
Allow,
Expect(LintExpectationId),
@@ -1800,7 +1816,7 @@ impl Level {
Note | OnceNote => {
spec.set_fg(Some(Color::Green)).set_intense(true);
}
- Help => {
+ Help | OnceHelp => {
spec.set_fg(Some(Color::Cyan)).set_intense(true);
}
FailureNote => {}
@@ -1815,7 +1831,7 @@ impl Level {
Fatal | Error { .. } => "error",
Warning(_) => "warning",
Note | OnceNote => "note",
- Help => "help",
+ Help | OnceHelp => "help",
FailureNote => "failure-note",
Allow => panic!("Shouldn't call on allowed error"),
Expect(_) => panic!("Shouldn't call on expected error"),
diff --git a/compiler/rustc_expand/src/expand.rs b/compiler/rustc_expand/src/expand.rs
index 34d16bf00..f87f4aba2 100644
--- a/compiler/rustc_expand/src/expand.rs
+++ b/compiler/rustc_expand/src/expand.rs
@@ -587,7 +587,7 @@ impl<'a, 'b> MacroExpander<'a, 'b> {
.resolver
.visit_ast_fragment_with_placeholders(self.cx.current_expansion.id, &fragment);
- if self.cx.sess.opts.incremental_relative_spans() {
+ if self.cx.sess.opts.incremental.is_some() {
for (invoc, _) in invocations.iter_mut() {
let expn_id = invoc.expansion_data.id;
let parent_def = self.cx.resolver.invocation_parent(expn_id);
diff --git a/compiler/rustc_expand/src/lib.rs b/compiler/rustc_expand/src/lib.rs
index c4a9b2ace..8b1fc5b90 100644
--- a/compiler/rustc_expand/src/lib.rs
+++ b/compiler/rustc_expand/src/lib.rs
@@ -11,7 +11,7 @@
#![feature(try_blocks)]
#![recursion_limit = "256"]
#![deny(rustc::untranslatable_diagnostic)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#[macro_use]
extern crate rustc_macros;
diff --git a/compiler/rustc_feature/src/accepted.rs b/compiler/rustc_feature/src/accepted.rs
index afcf30d0b..bcc64d48d 100644
--- a/compiler/rustc_feature/src/accepted.rs
+++ b/compiler/rustc_feature/src/accepted.rs
@@ -197,6 +197,8 @@ declare_features! (
/// + `impl<I:Iterator> Iterator for &mut Iterator`
/// + `impl Debug for Foo<'_>`
(accepted, impl_header_lifetime_elision, "1.31.0", Some(15872), None),
+ /// Allows referencing `Self` and projections in impl-trait.
+ (accepted, impl_trait_projections, "1.74.0", Some(103532), None),
/// Allows using `a..=b` and `..=b` as inclusive range syntaxes.
(accepted, inclusive_range_syntax, "1.26.0", Some(28237), None),
/// Allows inferring outlives requirements (RFC 2093).
@@ -267,6 +269,8 @@ declare_features! (
(accepted, non_modrs_mods, "1.30.0", Some(44660), None),
/// Allows the use of or-patterns (e.g., `0 | 1`).
(accepted, or_patterns, "1.53.0", Some(54883), None),
+ /// Allows using `+bundle,+whole-archive` link modifiers with native libs.
+ (accepted, packed_bundled_libs, "1.74.0", Some(108081), None),
/// Allows annotating functions conforming to `fn(&PanicInfo) -> !` with `#[panic_handler]`.
/// This defines the behavior of panics.
(accepted, panic_handler, "1.30.0", Some(44489), None),
diff --git a/compiler/rustc_feature/src/active.rs b/compiler/rustc_feature/src/active.rs
index f5bc140c0..783f39fdf 100644
--- a/compiler/rustc_feature/src/active.rs
+++ b/compiler/rustc_feature/src/active.rs
@@ -236,15 +236,15 @@ declare_features! (
/// Allows using the `#[fundamental]` attribute.
(active, fundamental, "1.0.0", Some(29635), None),
/// Allows using `#[link_name="llvm.*"]`.
- (active, link_llvm_intrinsics, "1.0.0", Some(29602), None),
+ (internal, link_llvm_intrinsics, "1.0.0", Some(29602), None),
/// Allows using the `#[linkage = ".."]` attribute.
(active, linkage, "1.0.0", Some(29603), None),
/// Allows declaring with `#![needs_panic_runtime]` that a panic runtime is needed.
(internal, needs_panic_runtime, "1.10.0", Some(32837), None),
- /// Allows using `+bundled,+whole-archive` native libs.
- (active, packed_bundled_libs, "1.69.0", Some(108081), None),
/// Allows using the `#![panic_runtime]` attribute.
(internal, panic_runtime, "1.10.0", Some(32837), None),
+ /// Allows `extern "platform-intrinsic" { ... }`.
+ (internal, platform_intrinsics, "1.4.0", Some(27731), None),
/// Allows using `#[rustc_allow_const_fn_unstable]`.
/// This is an attribute on `const fn` for the same
/// purpose as `#[allow_internal_unstable]`.
@@ -398,6 +398,11 @@ declare_features! (
(active, const_trait_impl, "1.42.0", Some(67792), None),
/// Allows the `?` operator in const contexts.
(active, const_try, "1.56.0", Some(74935), None),
+ /// Allows function attribute `#[coverage(on/off)]`, to control coverage
+ /// instrumentation of that function.
+ (active, coverage_attribute, "1.74.0", Some(84605), None),
+ /// Allows users to provide classes for fenced code block using `class:classname`.
+ (active, custom_code_classes_in_docs, "1.74.0", Some(79483), None),
/// Allows non-builtin attributes in inner attribute position.
(active, custom_inner_attributes, "1.30.0", Some(54726), None),
/// Allows custom test frameworks with `#![test_runner]` and `#[test_case]`.
@@ -411,7 +416,7 @@ declare_features! (
/// Allows having using `suggestion` in the `#[deprecated]` attribute.
(active, deprecated_suggestion, "1.61.0", Some(94785), None),
/// Allows using the `#[diagnostic]` attribute tool namespace
- (active, diagnostic_namespace, "1.73.0", Some(94785), None),
+ (active, diagnostic_namespace, "1.73.0", Some(111996), None),
/// Controls errors in trait implementations.
(active, do_not_recommend, "1.67.0", Some(51992), None),
/// Tells rustdoc to automatically generate `#[doc(cfg(...))]`.
@@ -465,8 +470,6 @@ declare_features! (
(active, impl_trait_in_assoc_type, "1.70.0", Some(63063), None),
/// Allows `impl Trait` as output type in `Fn` traits in return position of functions.
(active, impl_trait_in_fn_trait_return, "1.64.0", Some(99697), None),
- /// Allows referencing `Self` and projections in impl-trait.
- (active, impl_trait_projections, "1.67.0", Some(103532), None),
/// Allows using imported `main` function
(active, imported_main, "1.53.0", Some(28937), None),
/// Allows associated types in inherent impls.
@@ -509,9 +512,6 @@ declare_features! (
(active, never_type_fallback, "1.41.0", Some(65992), None),
/// Allows `#![no_core]`.
(active, no_core, "1.3.0", Some(29639), None),
- /// Allows function attribute `#[no_coverage]`, to bypass coverage
- /// instrumentation of that function.
- (active, no_coverage, "1.53.0", Some(84605), None),
/// Allows the use of `no_sanitize` attribute.
(active, no_sanitize, "1.42.0", Some(39699), None),
/// Allows using the `non_exhaustive_omitted_patterns` lint.
@@ -524,8 +524,6 @@ declare_features! (
(active, object_safe_for_dispatch, "1.40.0", Some(43561), None),
/// Allows using `#[optimize(X)]`.
(active, optimize_attribute, "1.34.0", Some(54882), None),
- /// Allows `extern "platform-intrinsic" { ... }`.
- (active, platform_intrinsics, "1.4.0", Some(27731), None),
/// Allows using `#![plugin(myplugin)]`.
(active, plugin, "1.0.0", Some(29597), None),
/// Allows exhaustive integer pattern matching on `usize` and `isize`.
@@ -584,6 +582,8 @@ declare_features! (
(active, type_privacy_lints, "1.72.0", Some(48054), None),
/// Enables rustc to generate code that instructs libstd to NOT ignore SIGPIPE.
(active, unix_sigpipe, "1.65.0", Some(97889), None),
+ /// Allows unnamed fields of struct and union type
+ (incomplete, unnamed_fields, "1.74.0", Some(49804), None),
/// Allows unsized fn parameters.
(active, unsized_fn_params, "1.49.0", Some(48055), None),
/// Allows unsized rvalues at arguments and parameters.
diff --git a/compiler/rustc_feature/src/builtin_attrs.rs b/compiler/rustc_feature/src/builtin_attrs.rs
index 2f7cff3ce..18397af56 100644
--- a/compiler/rustc_feature/src/builtin_attrs.rs
+++ b/compiler/rustc_feature/src/builtin_attrs.rs
@@ -395,7 +395,7 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
template!(List: "address, kcfi, memory, thread"), DuplicatesOk,
experimental!(no_sanitize)
),
- gated!(no_coverage, Normal, template!(Word), WarnFollowing, experimental!(no_coverage)),
+ gated!(coverage, Normal, template!(Word, List: "on|off"), WarnFollowing, coverage_attribute, experimental!(coverage)),
ungated!(
doc, Normal, template!(List: "hidden|inline|...", NameValueStr: "string"), DuplicatesOk
@@ -537,7 +537,6 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
allow_internal_unsafe, Normal, template!(Word), WarnFollowing,
"allow_internal_unsafe side-steps the unsafe_code lint",
),
- ungated!(rustc_safe_intrinsic, Normal, template!(Word), DuplicatesOk),
rustc_attr!(rustc_allowed_through_unstable_modules, Normal, template!(Word), WarnFollowing,
"rustc_allowed_through_unstable_modules special cases accidental stabilizations of stable items \
through unstable paths"),
@@ -700,6 +699,10 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
"#[rustc_pass_by_value] is used to mark types that must be passed by value instead of reference."
),
rustc_attr!(
+ rustc_never_returns_null_ptr, Normal, template!(Word), ErrorFollowing,
+ "#[rustc_never_returns_null_ptr] is used to mark functions returning non-null pointers."
+ ),
+ rustc_attr!(
rustc_coherence_is_core, AttributeType::CrateLevel, template!(Word), ErrorFollowing, @only_local: true,
"#![rustc_coherence_is_core] allows inherent methods on builtin types, only intended to be used in `core`."
),
@@ -802,6 +805,10 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
rustc_doc_primitive, Normal, template!(NameValueStr: "primitive name"), ErrorFollowing,
r#"`rustc_doc_primitive` is a rustc internal attribute"#,
),
+ rustc_attr!(
+ rustc_safe_intrinsic, Normal, template!(Word), WarnFollowing,
+ "the `#[rustc_safe_intrinsic]` attribute is used internally to mark intrinsics as safe"
+ ),
// ==========================================================================
// Internal attributes, Testing:
@@ -813,7 +820,9 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
rustc_attr!(TEST, rustc_insignificant_dtor, Normal, template!(Word), WarnFollowing),
rustc_attr!(TEST, rustc_strict_coherence, Normal, template!(Word), WarnFollowing),
rustc_attr!(TEST, rustc_variance, Normal, template!(Word), WarnFollowing),
+ rustc_attr!(TEST, rustc_variance_of_opaques, Normal, template!(Word), WarnFollowing),
rustc_attr!(TEST, rustc_layout, Normal, template!(List: "field1, field2, ..."), WarnFollowing),
+ rustc_attr!(TEST, rustc_abi, Normal, template!(List: "field1, field2, ..."), WarnFollowing),
rustc_attr!(TEST, rustc_regions, Normal, template!(Word), WarnFollowing),
rustc_attr!(
TEST, rustc_error, Normal,
diff --git a/compiler/rustc_feature/src/removed.rs b/compiler/rustc_feature/src/removed.rs
index ed5d76b86..699d8a34e 100644
--- a/compiler/rustc_feature/src/removed.rs
+++ b/compiler/rustc_feature/src/removed.rs
@@ -136,6 +136,9 @@ declare_features! (
Some("subsumed by `#![feature(allocator_internals)]`")),
/// Allows use of unary negate on unsigned integers, e.g., -e for e: u8
(removed, negate_unsigned, "1.0.0", Some(29645), None, None),
+ /// Allows `#[no_coverage]` on functions.
+ /// The feature was renamed to `coverage_attribute` and the attribute to `#[coverage(on|off)]`
+ (removed, no_coverage, "1.74.0", Some(84605), None, Some("renamed to `coverage_attribute`")),
/// Allows `#[no_debug]`.
(removed, no_debug, "1.43.0", Some(29721), None, Some("removed due to lack of demand")),
/// Allows using `#[on_unimplemented(..)]` on traits.
diff --git a/compiler/rustc_hir/src/def.rs b/compiler/rustc_hir/src/def.rs
index 642713096..3a4eb90f7 100644
--- a/compiler/rustc_hir/src/def.rs
+++ b/compiler/rustc_hir/src/def.rs
@@ -61,9 +61,7 @@ pub enum DefKind {
Variant,
Trait,
/// Type alias: `type Foo = Bar;`
- TyAlias {
- lazy: bool,
- },
+ TyAlias,
/// Type from an `extern` block.
ForeignTy,
/// Trait alias: `trait IntIterator = Iterator<Item = i32>;`
@@ -143,7 +141,7 @@ impl DefKind {
DefKind::Ctor(CtorOf::Struct, CtorKind::Fn) => "tuple struct",
DefKind::Ctor(CtorOf::Struct, CtorKind::Const) => "unit struct",
DefKind::OpaqueTy => "opaque type",
- DefKind::TyAlias { .. } => "type alias",
+ DefKind::TyAlias => "type alias",
DefKind::TraitAlias => "trait alias",
DefKind::AssocTy => "associated type",
DefKind::Union => "union",
@@ -199,7 +197,7 @@ impl DefKind {
| DefKind::Variant
| DefKind::Trait
| DefKind::OpaqueTy
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::AssocTy
@@ -250,7 +248,7 @@ impl DefKind {
| DefKind::Enum
| DefKind::Variant
| DefKind::Trait
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::AssocTy
diff --git a/compiler/rustc_hir/src/definitions.rs b/compiler/rustc_hir/src/definitions.rs
index 66b153d89..168b336e3 100644
--- a/compiler/rustc_hir/src/definitions.rs
+++ b/compiler/rustc_hir/src/definitions.rs
@@ -278,7 +278,8 @@ pub enum DefPathData {
Ctor,
/// A constant expression (see `{ast,hir}::AnonConst`).
AnonConst,
- /// An `impl Trait` type node.
+ /// An existential `impl Trait` type node.
+ /// Argument position `impl Trait` have a `TypeNs` with their pretty-printed name.
ImplTrait,
/// `impl Trait` generated associated type node.
ImplTraitAssocTy,
diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs
index 0bfd62d68..3eec66611 100644
--- a/compiler/rustc_hir/src/hir.rs
+++ b/compiler/rustc_hir/src/hir.rs
@@ -19,6 +19,7 @@ use rustc_macros::HashStable_Generic;
use rustc_span::hygiene::MacroKind;
use rustc_span::source_map::Spanned;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::ErrorGuaranteed;
use rustc_span::{def_id::LocalDefId, BytePos, Span, DUMMY_SP};
use rustc_target::asm::InlineAsmRegOrRegClass;
use rustc_target::spec::abi::Abi;
@@ -1415,6 +1416,9 @@ pub struct Let<'hir> {
pub pat: &'hir Pat<'hir>,
pub ty: Option<&'hir Ty<'hir>>,
pub init: &'hir Expr<'hir>,
+ /// `Some` when this let expressions is not in a syntanctically valid location.
+ /// Used to prevent building MIR in such situations.
+ pub is_recovered: Option<ErrorGuaranteed>,
}
#[derive(Debug, Clone, Copy, HashStable_Generic)]
@@ -1577,8 +1581,8 @@ pub enum BodyOwnerKind {
/// Closures
Closure,
- /// Constants and associated constants.
- Const,
+ /// Constants and associated constants, also including inline constants.
+ Const { inline: bool },
/// Initializer of a `static` item.
Static(Mutability),
@@ -1588,7 +1592,7 @@ impl BodyOwnerKind {
pub fn is_fn_or_closure(self) -> bool {
match self {
BodyOwnerKind::Fn | BodyOwnerKind::Closure => true,
- BodyOwnerKind::Const | BodyOwnerKind::Static(_) => false,
+ BodyOwnerKind::Const { .. } | BodyOwnerKind::Static(_) => false,
}
}
}
@@ -1611,7 +1615,7 @@ pub enum ConstContext {
///
/// For the most part, other contexts are treated just like a regular `const`, so they are
/// lumped into the same category.
- Const,
+ Const { inline: bool },
}
impl ConstContext {
@@ -1620,7 +1624,7 @@ impl ConstContext {
/// E.g. `const` or `static mut`.
pub fn keyword_name(self) -> &'static str {
match self {
- Self::Const => "const",
+ Self::Const { .. } => "const",
Self::Static(Mutability::Not) => "static",
Self::Static(Mutability::Mut) => "static mut",
Self::ConstFn => "const fn",
@@ -1633,7 +1637,7 @@ impl ConstContext {
impl fmt::Display for ConstContext {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
- Self::Const => write!(f, "constant"),
+ Self::Const { .. } => write!(f, "constant"),
Self::Static(_) => write!(f, "static"),
Self::ConstFn => write!(f, "constant function"),
}
@@ -2849,13 +2853,13 @@ impl ImplicitSelfKind {
#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug)]
#[derive(HashStable_Generic)]
pub enum IsAsync {
- Async,
+ Async(Span),
NotAsync,
}
impl IsAsync {
pub fn is_async(self) -> bool {
- self == IsAsync::Async
+ matches!(self, IsAsync::Async(_))
}
}
@@ -3292,7 +3296,7 @@ pub struct FnHeader {
impl FnHeader {
pub fn is_async(&self) -> bool {
- matches!(&self.asyncness, IsAsync::Async)
+ matches!(&self.asyncness, IsAsync::Async(_))
}
pub fn is_const(&self) -> bool {
@@ -3729,6 +3733,8 @@ impl<'hir> Node<'hir> {
Node::Lifetime(lt) => Some(lt.ident),
Node::GenericParam(p) => Some(p.name.ident()),
Node::TypeBinding(b) => Some(b.ident),
+ Node::PatField(f) => Some(f.ident),
+ Node::ExprField(f) => Some(f.ident),
Node::Param(..)
| Node::AnonConst(..)
| Node::ConstBlock(..)
@@ -3737,8 +3743,6 @@ impl<'hir> Node<'hir> {
| Node::Block(..)
| Node::Ctor(..)
| Node::Pat(..)
- | Node::PatField(..)
- | Node::ExprField(..)
| Node::Arm(..)
| Node::Local(..)
| Node::Crate(..)
@@ -4087,10 +4091,10 @@ mod size_asserts {
static_assert_size!(GenericBound<'_>, 48);
static_assert_size!(Generics<'_>, 56);
static_assert_size!(Impl<'_>, 80);
- static_assert_size!(ImplItem<'_>, 80);
- static_assert_size!(ImplItemKind<'_>, 32);
- static_assert_size!(Item<'_>, 80);
- static_assert_size!(ItemKind<'_>, 48);
+ static_assert_size!(ImplItem<'_>, 88);
+ static_assert_size!(ImplItemKind<'_>, 40);
+ static_assert_size!(Item<'_>, 88);
+ static_assert_size!(ItemKind<'_>, 56);
static_assert_size!(Local<'_>, 64);
static_assert_size!(Param<'_>, 32);
static_assert_size!(Pat<'_>, 72);
@@ -4101,8 +4105,8 @@ mod size_asserts {
static_assert_size!(Res, 12);
static_assert_size!(Stmt<'_>, 32);
static_assert_size!(StmtKind<'_>, 16);
- static_assert_size!(TraitItem<'_>, 80);
- static_assert_size!(TraitItemKind<'_>, 40);
+ static_assert_size!(TraitItem<'_>, 88);
+ static_assert_size!(TraitItemKind<'_>, 48);
static_assert_size!(Ty<'_>, 48);
static_assert_size!(TyKind<'_>, 32);
// tidy-alphabetical-end
diff --git a/compiler/rustc_hir/src/intravisit.rs b/compiler/rustc_hir/src/intravisit.rs
index 172f557f8..d9195a374 100644
--- a/compiler/rustc_hir/src/intravisit.rs
+++ b/compiler/rustc_hir/src/intravisit.rs
@@ -152,7 +152,7 @@ pub mod nested_filter {
/// visit fn bodies for fns that it encounters, and closure bodies, but
/// skip over nested item-like things.
///
- /// See the comments on `ItemLikeVisitor` for more details on the overall
+ /// See the comments at [`rustc_hir::intravisit`] for more details on the overall
/// visit strategy.
pub trait NestedFilter<'hir> {
type Map: Map<'hir>;
@@ -229,8 +229,8 @@ pub trait Visitor<'v>: Sized {
/// `Self::NestedFilter` is `nested_filter::None`, this method does
/// nothing. **You probably don't want to override this method** --
/// instead, override [`Self::NestedFilter`] or use the "shallow" or
- /// "deep" visit patterns described on
- /// `itemlikevisit::ItemLikeVisitor`. The only reason to override
+ /// "deep" visit patterns described at
+ /// [`rustc_hir::intravisit`]. The only reason to override
/// this method is if you want a nested pattern but cannot supply a
/// [`Map`]; see `nested_visit_map` for advice.
fn visit_nested_item(&mut self, id: ItemId) {
diff --git a/compiler/rustc_hir/src/lang_items.rs b/compiler/rustc_hir/src/lang_items.rs
index 302a94984..23b20543d 100644
--- a/compiler/rustc_hir/src/lang_items.rs
+++ b/compiler/rustc_hir/src/lang_items.rs
@@ -238,6 +238,7 @@ language_item_table! {
PanicLocation, sym::panic_location, panic_location, Target::Struct, GenericRequirement::None;
PanicImpl, sym::panic_impl, panic_impl, Target::Fn, GenericRequirement::None;
PanicCannotUnwind, sym::panic_cannot_unwind, panic_cannot_unwind, Target::Fn, GenericRequirement::Exact(0);
+ PanicInCleanup, sym::panic_in_cleanup, panic_in_cleanup, Target::Fn, GenericRequirement::Exact(0);
/// libstd panic entry point. Necessary for const eval to be able to catch it
BeginPanic, sym::begin_panic, begin_panic_fn, Target::Fn, GenericRequirement::None;
diff --git a/compiler/rustc_hir/src/lib.rs b/compiler/rustc_hir/src/lib.rs
index 34214931a..094d5b1e7 100644
--- a/compiler/rustc_hir/src/lib.rs
+++ b/compiler/rustc_hir/src/lib.rs
@@ -13,7 +13,7 @@
#![recursion_limit = "256"]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#[macro_use]
extern crate rustc_macros;
diff --git a/compiler/rustc_hir/src/target.rs b/compiler/rustc_hir/src/target.rs
index 644c4d826..0d65ddb56 100644
--- a/compiler/rustc_hir/src/target.rs
+++ b/compiler/rustc_hir/src/target.rs
@@ -101,7 +101,7 @@ impl Target {
DefKind::Mod => Target::Mod,
DefKind::ForeignMod => Target::ForeignMod,
DefKind::GlobalAsm => Target::GlobalAsm,
- DefKind::TyAlias { .. } => Target::TyAlias,
+ DefKind::TyAlias => Target::TyAlias,
DefKind::OpaqueTy => Target::OpaqueTy,
DefKind::Enum => Target::Enum,
DefKind::Struct => Target::Struct,
diff --git a/compiler/rustc_hir_analysis/messages.ftl b/compiler/rustc_hir_analysis/messages.ftl
index 597cae6ff..1c926533a 100644
--- a/compiler/rustc_hir_analysis/messages.ftl
+++ b/compiler/rustc_hir_analysis/messages.ftl
@@ -21,12 +21,16 @@ hir_analysis_auto_deref_reached_recursion_limit = reached the recursion limit wh
.label = deref recursion limit reached
.help = consider increasing the recursion limit by adding a `#![recursion_limit = "{$suggested_limit}"]` attribute to your crate (`{$crate_name}`)
-hir_analysis_cannot_capture_late_bound_const_in_anon_const =
- cannot capture late-bound const parameter in a constant
+hir_analysis_cannot_capture_late_bound_const =
+ cannot capture late-bound const parameter in {$what}
.label = parameter defined here
-hir_analysis_cannot_capture_late_bound_ty_in_anon_const =
- cannot capture late-bound type parameter in a constant
+hir_analysis_cannot_capture_late_bound_lifetime =
+ cannot capture late-bound lifetime in {$what}
+ .label = lifetime defined here
+
+hir_analysis_cannot_capture_late_bound_ty =
+ cannot capture late-bound type parameter in {$what}
.label = parameter defined here
hir_analysis_cast_thin_pointer_to_fat_pointer = cannot cast thin pointer `{$expr_ty}` to fat pointer `{$cast_ty}`
@@ -34,6 +38,17 @@ hir_analysis_cast_thin_pointer_to_fat_pointer = cannot cast thin pointer `{$expr
hir_analysis_closure_implicit_hrtb = implicit types in closure signatures are forbidden when `for<...>` is present
.label = `for<...>` is here
+hir_analysis_coerce_unsized_may = the trait `{$trait_name}` may only be implemented for a coercion between structures
+
+hir_analysis_coerce_unsized_multi = implementing the trait `CoerceUnsized` requires multiple coercions
+ .note = `CoerceUnsized` may only be implemented for a coercion between structures with one field being coerced
+ .coercions_note = currently, {$number} fields need coercions: {$coercions}
+ .label = requires multiple coercions
+
+hir_analysis_coercion_between_struct_same_note = expected coercion between the same definition; expected `{$source_path}`, found `{$target_path}`
+
+hir_analysis_coercion_between_struct_single_note = expected a single field to be coerced, none found
+
hir_analysis_const_bound_for_non_const_trait =
~const can only be applied to `#[const_trait]` traits
@@ -57,6 +72,15 @@ hir_analysis_copy_impl_on_type_with_dtor =
the trait `Copy` cannot be implemented for this type; the type has a destructor
.label = `Copy` not allowed on types with destructors
+hir_analysis_dispatch_from_dyn_multi = implementing the `DispatchFromDyn` trait requires multiple coercions
+ .note = the trait `DispatchFromDyn` may only be implemented for a coercion between structures with a single field being coerced
+ .coercions_note = currently, {$number} fields need coercions: {$coercions}
+
+hir_analysis_dispatch_from_dyn_repr = structs implementing `DispatchFromDyn` may not have `#[repr(packed)]` or `#[repr(C)]`
+
+hir_analysis_dispatch_from_dyn_zst = the trait `DispatchFromDyn` may only be implemented for structs containing the field being coerced, ZST fields with 1 byte alignment, and nothing else
+ .note = extra field `{$name}` of type `{$ty}` is not allowed
+
hir_analysis_drop_impl_negative = negative `Drop` impls are not supported
hir_analysis_drop_impl_on_wrong_item =
@@ -95,6 +119,34 @@ hir_analysis_impl_not_marked_default = `{$ident}` specializes an item from a par
hir_analysis_impl_not_marked_default_err = `{$ident}` specializes an item from a parent `impl`, but that item is not marked `default`
.note = parent implementation is in crate `{$cname}`
+hir_analysis_inherent_dyn = cannot define inherent `impl` for a dyn auto trait
+ .label = impl requires at least one non-auto trait
+ .note = define and implement a new trait or type instead
+
+hir_analysis_inherent_nominal = no nominal type found for inherent implementation
+ .label = impl requires a nominal type
+ .note = either implement a trait on it or create a newtype to wrap it instead
+hir_analysis_inherent_primitive_ty = cannot define inherent `impl` for primitive types
+ .help = consider using an extension trait instead
+
+hir_analysis_inherent_primitive_ty_note = you could also try moving the reference to uses of `{$subty}` (such as `self`) within the implementation
+
+hir_analysis_inherent_ty_outside = cannot define inherent `impl` for a type outside of the crate where the type is defined
+ .help = consider moving this inherent impl into the crate defining the type if possible
+ .span_help = alternatively add `#[rustc_has_incoherent_inherent_impls]` to the type and `#[rustc_allow_incoherent_impl]` to the relevant impl items
+
+hir_analysis_inherent_ty_outside_new = cannot define inherent `impl` for a type outside of the crate where the type is defined
+ .label = impl for type defined outside of crate.
+ .note = define and implement a trait or new type instead
+
+hir_analysis_inherent_ty_outside_primitive = cannot define inherent `impl` for primitive types outside of `core`
+ .help = consider moving this inherent impl into `core` if possible
+ .span_help = alternatively add `#[rustc_allow_incoherent_impl]` to the relevant impl items
+
+hir_analysis_inherent_ty_outside_relevant = cannot define inherent `impl` for a type outside of the crate where the type is defined
+ .help = consider moving this inherent impl into the crate defining the type if possible
+ .span_help = alternatively add `#[rustc_allow_incoherent_impl]` to the relevant impl items
+
hir_analysis_invalid_union_field =
field must implement `Copy` or be wrapped in `ManuallyDrop<...>` to be used in a union
.note = union fields must not have drop side-effects, which is currently enforced via either `Copy` or `ManuallyDrop<...>`
@@ -200,6 +252,8 @@ hir_analysis_pass_to_variadic_function = can't pass `{$ty}` to variadic function
hir_analysis_placeholder_not_allowed_item_signatures = the placeholder `_` is not allowed within types on item signatures for {$kind}
.label = not allowed in type signatures
+hir_analysis_requires_note = the `{$trait_name}` impl for `{$ty}` requires that `{$error_predicate}`
+
hir_analysis_return_type_notation_conflicting_bound =
ambiguous associated function `{$assoc_name}` for `{$ty_name}`
.note = `{$assoc_name}` is declared in two supertraits: `{$first_bound}` and `{$second_bound}`
@@ -222,6 +276,12 @@ hir_analysis_return_type_notation_on_non_rpitit =
.note = function returns `{$ty}`, which is not compatible with associated type return bounds
.label = this function must be `async` or return `impl Trait`
+hir_analysis_rpitit_refined = impl trait in impl method signature does not match trait method signature
+ .suggestion = replace the return type so that it matches the trait
+ .label = return type from trait method defined here
+ .unmatched_bound_label = this bound is stronger than that defined on the trait
+ .note = add `#[allow(refining_impl_trait)]` if it is intended for this to be part of the public API of this crate
+
hir_analysis_self_in_impl_self =
`Self` is not valid in the self type of an impl block
.note = replace `Self` with a different type
@@ -232,20 +292,20 @@ hir_analysis_simd_ffi_highly_experimental = use of SIMD type{$snip} in FFI is hi
hir_analysis_specialization_trait = implementing `rustc_specialization_trait` traits is unstable
.help = add `#![feature(min_specialization)]` to the crate attributes to enable
-hir_analysis_start_function_parameters = start function is not allowed to have type parameters
- .label = start function cannot have type parameters
+hir_analysis_start_function_parameters = `#[start]` function is not allowed to have type parameters
+ .label = `#[start]` function cannot have type parameters
-hir_analysis_start_function_where = start function is not allowed to have a `where` clause
- .label = start function cannot have a `where` clause
+hir_analysis_start_function_where = `#[start]` function is not allowed to have a `where` clause
+ .label = `#[start]` function cannot have a `where` clause
-hir_analysis_start_not_async = `start` is not allowed to be `async`
- .label = `start` is not allowed to be `async`
+hir_analysis_start_not_async = `#[start]` function is not allowed to be `async`
+ .label = `#[start]` is not allowed to be `async`
-hir_analysis_start_not_target_feature = `start` is not allowed to have `#[target_feature]`
- .label = `start` is not allowed to have `#[target_feature]`
+hir_analysis_start_not_target_feature = `#[start]` function is not allowed to have `#[target_feature]`
+ .label = `#[start]` function is not allowed to have `#[target_feature]`
-hir_analysis_start_not_track_caller = `start` is not allowed to be `#[track_caller]`
- .label = `start` is not allowed to be `#[track_caller]`
+hir_analysis_start_not_track_caller = `#[start]` function is not allowed to be `#[track_caller]`
+ .label = `#[start]` function is not allowed to be `#[track_caller]`
hir_analysis_static_specialize = cannot specialize on `'static` lifetime
@@ -261,6 +321,9 @@ hir_analysis_too_large_static = extern static is too large for the current archi
hir_analysis_track_caller_on_main = `main` function is not allowed to be `#[track_caller]`
.suggestion = remove this annotation
+hir_analysis_trait_cannot_impl_for_ty = the trait `{$trait_name}` cannot be implemented for this type
+ .label = this field does not implement `{$trait_name}`
+
hir_analysis_trait_object_declared_with_no_traits =
at least one trait is required for an object type
.alias_span = this alias does not contain a trait
@@ -270,13 +333,13 @@ hir_analysis_transparent_enum_variant = transparent enum needs exactly one varia
.many_label = too many variants in `{$path}`
.multi_label = variant here
-hir_analysis_transparent_non_zero_sized = transparent {$desc} needs at most one non-zero-sized field, but has {$field_count}
- .label = needs at most one non-zero-sized field, but has {$field_count}
- .labels = this field is non-zero-sized
+hir_analysis_transparent_non_zero_sized = transparent {$desc} needs at most one field with non-trivial size or alignment, but has {$field_count}
+ .label = needs at most one field with non-trivial size or alignment, but has {$field_count}
+ .labels = this field has non-zero size or requires alignment
-hir_analysis_transparent_non_zero_sized_enum = the variant of a transparent {$desc} needs at most one non-zero-sized field, but has {$field_count}
- .label = needs at most one non-zero-sized field, but has {$field_count}
- .labels = this field is non-zero-sized
+hir_analysis_transparent_non_zero_sized_enum = the variant of a transparent {$desc} needs at most one field with non-trivial size or alignment, but has {$field_count}
+ .label = needs at most one field with non-trivial size or alignment, but has {$field_count}
+ .labels = this field has non-zero size or requires alignment
hir_analysis_typeof_reserved_keyword_used =
`typeof` is a reserved keyword but unimplemented
diff --git a/compiler/rustc_hir_analysis/src/astconv/bounds.rs b/compiler/rustc_hir_analysis/src/astconv/bounds.rs
index ba152cd48..21611e9c5 100644
--- a/compiler/rustc_hir_analysis/src/astconv/bounds.rs
+++ b/compiler/rustc_hir_analysis/src/astconv/bounds.rs
@@ -427,7 +427,7 @@ impl<'tcx> dyn AstConv<'tcx> + '_ {
let bound_vars = tcx.late_bound_vars(binding.hir_id);
ty::Binder::bind_with_vars(subst_output, bound_vars)
} else {
- // Include substitutions for generic parameters of associated types
+ // Append the generic arguments of the associated type to the `trait_ref`.
candidate.map_bound(|trait_ref| {
let ident = Ident::new(assoc_item.name, binding.item_name.span);
let item_segment = hir::PathSegment {
diff --git a/compiler/rustc_hir_analysis/src/astconv/errors.rs b/compiler/rustc_hir_analysis/src/astconv/errors.rs
index bd311c98f..ed4dde419 100644
--- a/compiler/rustc_hir_analysis/src/astconv/errors.rs
+++ b/compiler/rustc_hir_analysis/src/astconv/errors.rs
@@ -110,16 +110,22 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
{
// The fallback span is needed because `assoc_name` might be an `Fn()`'s `Output` without a
// valid span, so we point at the whole path segment instead.
- let span = if assoc_name.span != DUMMY_SP { assoc_name.span } else { span };
+ let is_dummy = assoc_name.span == DUMMY_SP;
+
let mut err = struct_span_err!(
self.tcx().sess,
- span,
+ if is_dummy { span } else { assoc_name.span },
E0220,
"associated type `{}` not found for `{}`",
assoc_name,
ty_param_name
);
+ if is_dummy {
+ err.span_label(span, format!("associated type `{assoc_name}` not found"));
+ return err.emit();
+ }
+
let all_candidate_names: Vec<_> = all_candidates()
.flat_map(|r| self.tcx().associated_items(r.def_id()).in_definition_order())
.filter_map(|item| {
@@ -131,10 +137,9 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
})
.collect();
- if let (Some(suggested_name), true) = (
- find_best_match_for_name(&all_candidate_names, assoc_name.name, None),
- assoc_name.span != DUMMY_SP,
- ) {
+ if let Some(suggested_name) =
+ find_best_match_for_name(&all_candidate_names, assoc_name.name, None)
+ {
err.span_suggestion(
assoc_name.span,
"there is an associated type with a similar name",
@@ -172,10 +177,9 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
})
.collect();
- if let (Some(suggested_name), true) = (
- find_best_match_for_name(&wider_candidate_names, assoc_name.name, None),
- assoc_name.span != DUMMY_SP,
- ) {
+ if let Some(suggested_name) =
+ find_best_match_for_name(&wider_candidate_names, assoc_name.name, None)
+ {
if let [best_trait] = visible_traits
.iter()
.filter(|trait_def_id| {
@@ -197,7 +201,28 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
}
}
- err.span_label(span, format!("associated type `{assoc_name}` not found"));
+ // If we still couldn't find any associated type, and only one associated type exists,
+ // suggests using it.
+
+ if all_candidate_names.len() == 1 {
+ // this should still compile, except on `#![feature(associated_type_defaults)]`
+ // where it could suggests `type A = Self::A`, thus recursing infinitely
+ let applicability = if self.tcx().features().associated_type_defaults {
+ Applicability::Unspecified
+ } else {
+ Applicability::MaybeIncorrect
+ };
+
+ err.span_suggestion(
+ assoc_name.span,
+ format!("`{ty_param_name}` has the following associated type"),
+ all_candidate_names.first().unwrap().to_string(),
+ applicability,
+ );
+ } else {
+ err.span_label(assoc_name.span, format!("associated type `{assoc_name}` not found"));
+ }
+
err.emit()
}
@@ -597,7 +622,21 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
}
}
}
- if !suggestions.is_empty() {
+ suggestions.sort_by_key(|&(span, _)| span);
+ // There are cases where one bound points to a span within another bound's span, like when
+ // you have code like the following (#115019), so we skip providing a suggestion in those
+ // cases to avoid having a malformed suggestion.
+ //
+ // pub struct Flatten<I> {
+ // inner: <IntoIterator<Item: IntoIterator<Item: >>::IntoIterator as Item>::core,
+ // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ // | ^^^^^^^^^^^^^^^^^^^^^
+ // | |
+ // | associated types `Item`, `IntoIter` must be specified
+ // associated types `Item`, `IntoIter` must be specified
+ // }
+ let overlaps = suggestions.windows(2).any(|pair| pair[0].0.overlaps(pair[1].0));
+ if !suggestions.is_empty() && !overlaps {
err.multipart_suggestion(
format!("specify the associated type{}", pluralize!(types_count)),
suggestions,
diff --git a/compiler/rustc_hir_analysis/src/astconv/generics.rs b/compiler/rustc_hir_analysis/src/astconv/generics.rs
index 1372cc896..e3621ef93 100644
--- a/compiler/rustc_hir_analysis/src/astconv/generics.rs
+++ b/compiler/rustc_hir_analysis/src/astconv/generics.rs
@@ -139,22 +139,22 @@ fn generic_arg_mismatch_err(
err.emit()
}
-/// Creates the relevant generic argument substitutions
+/// Creates the relevant generic arguments
/// corresponding to a set of generic parameters. This is a
/// rather complex function. Let us try to explain the role
/// of each of its parameters:
///
-/// To start, we are given the `def_id` of the thing we are
-/// creating the substitutions for, and a partial set of
-/// substitutions `parent_args`. In general, the substitutions
-/// for an item begin with substitutions for all the "parents" of
+/// To start, we are given the `def_id` of the thing whose generic
+/// parameters we are instantiating, and a partial set of
+/// arguments `parent_args`. In general, the generic arguments
+/// for an item begin with arguments for all the "parents" of
/// that item -- e.g., for a method it might include the
/// parameters from the impl.
///
/// Therefore, the method begins by walking down these parents,
/// starting with the outermost parent and proceed inwards until
/// it reaches `def_id`. For each parent `P`, it will check `parent_args`
-/// first to see if the parent's substitutions are listed in there. If so,
+/// first to see if the parent's arguments are listed in there. If so,
/// we can append those and move on. Otherwise, it invokes the
/// three callback functions:
///
@@ -188,7 +188,7 @@ pub fn create_args_for_parent_generic_args<'tcx, 'a>(
stack.push((def_id, parent_defs));
}
- // We manually build up the substitution, rather than using convenience
+ // We manually build up the generic arguments, rather than using convenience
// methods in `subst.rs`, so that we can iterate over the arguments and
// parameters in lock-step linearly, instead of trying to match each pair.
let mut args: SmallVec<[ty::GenericArg<'tcx>; 8]> = SmallVec::with_capacity(count);
@@ -196,7 +196,8 @@ pub fn create_args_for_parent_generic_args<'tcx, 'a>(
while let Some((def_id, defs)) = stack.pop() {
let mut params = defs.params.iter().peekable();
- // If we have already computed substitutions for parents, we can use those directly.
+ // If we have already computed the generic arguments for parents,
+ // we can use those directly.
while let Some(&param) = params.peek() {
if let Some(&kind) = parent_args.get(param.index as usize) {
args.push(kind);
diff --git a/compiler/rustc_hir_analysis/src/astconv/mod.rs b/compiler/rustc_hir_analysis/src/astconv/mod.rs
index 668763f9b..56b1fd369 100644
--- a/compiler/rustc_hir_analysis/src/astconv/mod.rs
+++ b/compiler/rustc_hir_analysis/src/astconv/mod.rs
@@ -268,9 +268,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
// (*) -- not late-bound, won't change
}
- Some(rbv::ResolvedArg::Error(_)) => {
- bug!("only ty/ct should resolve as ResolvedArg::Error")
- }
+ Some(rbv::ResolvedArg::Error(guar)) => ty::Region::new_error(tcx, guar),
None => {
self.re_infer(def, lifetime.ident.span).unwrap_or_else(|| {
@@ -291,7 +289,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
}
/// Given a path `path` that refers to an item `I` with the declared generics `decl_generics`,
- /// returns an appropriate set of substitutions for this particular reference to `I`.
+ /// returns an appropriate set of generic arguments for this particular reference to `I`.
pub fn ast_path_args_for_ty(
&self,
span: Span,
@@ -317,7 +315,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
/// Given the type/lifetime/const arguments provided to some path (along with
/// an implicit `Self`, if this is a trait reference), returns the complete
- /// set of substitutions. This may involve applying defaulted type parameters.
+ /// set of generic arguments. This may involve applying defaulted type parameters.
/// Constraints on associated types are created from `create_assoc_bindings_for_generic_args`.
///
/// Example:
@@ -523,7 +521,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
Ty::new_misc_error(tcx).into()
}
}
- GenericParamDefKind::Const { has_default } => {
+ GenericParamDefKind::Const { has_default, .. } => {
let ty = tcx
.at(self.span)
.type_of(param.def_id)
@@ -910,19 +908,17 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
) -> Ty<'tcx> {
let tcx = self.tcx();
let args = self.ast_path_args_for_ty(span, did, item_segment);
- let ty = tcx.at(span).type_of(did);
- if let DefKind::TyAlias { lazy } = tcx.def_kind(did)
- && (lazy || ty.skip_binder().has_opaque_types())
+ if let DefKind::TyAlias = tcx.def_kind(did)
+ && tcx.type_alias_is_lazy(did)
{
- // Type aliases referring to types that contain opaque types (but aren't just directly
- // referencing a single opaque type) as well as those defined in crates that have the
+ // Type aliases defined in crates that have the
// feature `lazy_type_alias` enabled get encoded as a type alias that normalization will
// then actually instantiate the where bounds of.
let alias_ty = tcx.mk_alias_ty(did, args);
Ty::new_alias(tcx, ty::Weak, alias_ty)
} else {
- ty.instantiate(tcx, args)
+ tcx.at(span).type_of(did).instantiate(tcx, args)
}
}
@@ -2161,7 +2157,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
}
Res::Def(
DefKind::Enum
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::Struct
| DefKind::Union
| DefKind::ForeignTy,
@@ -2200,27 +2196,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
err.span_note(span, format!("type parameter `{name}` defined here"));
}
});
-
- match tcx.named_bound_var(hir_id) {
- Some(rbv::ResolvedArg::LateBound(debruijn, index, _)) => {
- let name =
- tcx.hir().name(tcx.hir().local_def_id_to_hir_id(def_id.expect_local()));
- let br = ty::BoundTy {
- var: ty::BoundVar::from_u32(index),
- kind: ty::BoundTyKind::Param(def_id, name),
- };
- Ty::new_bound(tcx, debruijn, br)
- }
- Some(rbv::ResolvedArg::EarlyBound(_)) => {
- let def_id = def_id.expect_local();
- let item_def_id = tcx.hir().ty_param_owner(def_id);
- let generics = tcx.generics_of(item_def_id);
- let index = generics.param_def_id_to_index[&def_id.to_def_id()];
- Ty::new_param(tcx, index, tcx.hir().ty_param_name(def_id))
- }
- Some(rbv::ResolvedArg::Error(guar)) => Ty::new_error(tcx, guar),
- arg => bug!("unexpected bound var resolution for {hir_id:?}: {arg:?}"),
- }
+ self.hir_id_to_bound_ty(hir_id)
}
Res::SelfTyParam { .. } => {
// `Self` in trait or type alias.
@@ -2389,6 +2365,57 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
}
}
+ // Converts a hir id corresponding to a type parameter to
+ // a early-bound `ty::Param` or late-bound `ty::Bound`.
+ pub(crate) fn hir_id_to_bound_ty(&self, hir_id: hir::HirId) -> Ty<'tcx> {
+ let tcx = self.tcx();
+ match tcx.named_bound_var(hir_id) {
+ Some(rbv::ResolvedArg::LateBound(debruijn, index, def_id)) => {
+ let name = tcx.item_name(def_id);
+ let br = ty::BoundTy {
+ var: ty::BoundVar::from_u32(index),
+ kind: ty::BoundTyKind::Param(def_id, name),
+ };
+ Ty::new_bound(tcx, debruijn, br)
+ }
+ Some(rbv::ResolvedArg::EarlyBound(def_id)) => {
+ let def_id = def_id.expect_local();
+ let item_def_id = tcx.hir().ty_param_owner(def_id);
+ let generics = tcx.generics_of(item_def_id);
+ let index = generics.param_def_id_to_index[&def_id.to_def_id()];
+ Ty::new_param(tcx, index, tcx.hir().ty_param_name(def_id))
+ }
+ Some(rbv::ResolvedArg::Error(guar)) => Ty::new_error(tcx, guar),
+ arg => bug!("unexpected bound var resolution for {hir_id:?}: {arg:?}"),
+ }
+ }
+
+ // Converts a hir id corresponding to a const parameter to
+ // a early-bound `ConstKind::Param` or late-bound `ConstKind::Bound`.
+ pub(crate) fn hir_id_to_bound_const(
+ &self,
+ hir_id: hir::HirId,
+ param_ty: Ty<'tcx>,
+ ) -> Const<'tcx> {
+ let tcx = self.tcx();
+ match tcx.named_bound_var(hir_id) {
+ Some(rbv::ResolvedArg::EarlyBound(def_id)) => {
+ // Find the name and index of the const parameter by indexing the generics of
+ // the parent item and construct a `ParamConst`.
+ let item_def_id = tcx.parent(def_id);
+ let generics = tcx.generics_of(item_def_id);
+ let index = generics.param_def_id_to_index[&def_id];
+ let name = tcx.item_name(def_id);
+ ty::Const::new_param(tcx, ty::ParamConst::new(index, name), param_ty)
+ }
+ Some(rbv::ResolvedArg::LateBound(debruijn, index, _)) => {
+ ty::Const::new_bound(tcx, debruijn, ty::BoundVar::from_u32(index), param_ty)
+ }
+ Some(rbv::ResolvedArg::Error(guar)) => ty::Const::new_error(tcx, guar, param_ty),
+ arg => bug!("unexpected bound var resolution for {:?}: {arg:?}", hir_id),
+ }
+ }
+
/// Parses the programmer's textual representation of a type into our
/// internal notion of a type.
pub fn ast_ty_to_ty(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> {
@@ -2747,7 +2774,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
) {
for br in referenced_regions.difference(&constrained_regions) {
let br_name = match *br {
- ty::BrNamed(_, kw::UnderscoreLifetime) | ty::BrAnon(..) | ty::BrEnv => {
+ ty::BrNamed(_, kw::UnderscoreLifetime) | ty::BrAnon | ty::BrEnv => {
"an anonymous lifetime".to_string()
}
ty::BrNamed(_, name) => format!("lifetime `{name}`"),
@@ -2755,7 +2782,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
let mut err = generate_err(&br_name);
- if let ty::BrNamed(_, kw::UnderscoreLifetime) | ty::BrAnon(..) = *br {
+ if let ty::BrNamed(_, kw::UnderscoreLifetime) | ty::BrAnon = *br {
// The only way for an anonymous lifetime to wind up
// in the return type but **also** be unconstrained is
// if it only appears in "associated types" in the
diff --git a/compiler/rustc_hir_analysis/src/check/check.rs b/compiler/rustc_hir_analysis/src/check/check.rs
index 2c7788498..44e1bdb83 100644
--- a/compiler/rustc_hir_analysis/src/check/check.rs
+++ b/compiler/rustc_hir_analysis/src/check/check.rs
@@ -5,20 +5,17 @@ use super::compare_impl_item::check_type_bounds;
use super::compare_impl_item::{compare_impl_method, compare_impl_ty};
use super::*;
use rustc_attr as attr;
-use rustc_errors::{Applicability, ErrorGuaranteed, MultiSpan};
+use rustc_errors::{ErrorGuaranteed, MultiSpan};
use rustc_hir as hir;
-use rustc_hir::def::{CtorKind, DefKind, Res};
+use rustc_hir::def::{CtorKind, DefKind};
use rustc_hir::def_id::{DefId, LocalDefId, LocalModDefId};
-use rustc_hir::intravisit::Visitor;
-use rustc_hir::{ItemKind, Node, PathSegment};
-use rustc_infer::infer::opaque_types::ConstrainOpaqueTypeRegionVisitor;
+use rustc_hir::Node;
use rustc_infer::infer::outlives::env::OutlivesEnvironment;
use rustc_infer::infer::{RegionVariableOrigin, TyCtxtInferExt};
use rustc_infer::traits::{Obligation, TraitEngineExt as _};
use rustc_lint_defs::builtin::REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS;
-use rustc_middle::hir::nested_filter;
use rustc_middle::middle::stability::EvalResult;
-use rustc_middle::traits::DefiningAnchor;
+use rustc_middle::traits::{DefiningAnchor, ObligationCauseCode};
use rustc_middle::ty::fold::BottomUpFolder;
use rustc_middle::ty::layout::{LayoutError, MAX_SIMD_LANES};
use rustc_middle::ty::util::{Discr, IntTypeExt};
@@ -218,9 +215,6 @@ fn check_opaque(tcx: TyCtxt<'_>, id: hir::ItemId) {
let args = GenericArgs::identity_for_item(tcx, item.owner_id);
let span = tcx.def_span(item.owner_id.def_id);
- if !tcx.features().impl_trait_projections {
- check_opaque_for_inheriting_lifetimes(tcx, item.owner_id.def_id, span);
- }
if tcx.type_of(item.owner_id.def_id).instantiate_identity().references_error() {
return;
}
@@ -231,129 +225,6 @@ fn check_opaque(tcx: TyCtxt<'_>, id: hir::ItemId) {
let _ = check_opaque_meets_bounds(tcx, item.owner_id.def_id, span, &origin);
}
-/// Checks that an opaque type does not use `Self` or `T::Foo` projections that would result
-/// in "inheriting lifetimes".
-#[instrument(level = "debug", skip(tcx, span))]
-pub(super) fn check_opaque_for_inheriting_lifetimes(
- tcx: TyCtxt<'_>,
- def_id: LocalDefId,
- span: Span,
-) {
- let item = tcx.hir().expect_item(def_id);
- debug!(?item, ?span);
-
- struct ProhibitOpaqueVisitor<'tcx> {
- tcx: TyCtxt<'tcx>,
- opaque_identity_ty: Ty<'tcx>,
- parent_count: u32,
- references_parent_regions: bool,
- selftys: Vec<(Span, Option<String>)>,
- }
-
- impl<'tcx> ty::visit::TypeVisitor<TyCtxt<'tcx>> for ProhibitOpaqueVisitor<'tcx> {
- type BreakTy = Ty<'tcx>;
-
- fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
- debug!(?t, "root_visit_ty");
- if t == self.opaque_identity_ty {
- ControlFlow::Continue(())
- } else {
- t.visit_with(&mut ConstrainOpaqueTypeRegionVisitor {
- tcx: self.tcx,
- op: |region| {
- if let ty::ReEarlyBound(ty::EarlyBoundRegion { index, .. }) = *region
- && index < self.parent_count
- {
- self.references_parent_regions= true;
- }
- },
- });
- if self.references_parent_regions {
- ControlFlow::Break(t)
- } else {
- ControlFlow::Continue(())
- }
- }
- }
- }
-
- impl<'tcx> Visitor<'tcx> for ProhibitOpaqueVisitor<'tcx> {
- type NestedFilter = nested_filter::OnlyBodies;
-
- fn nested_visit_map(&mut self) -> Self::Map {
- self.tcx.hir()
- }
-
- fn visit_ty(&mut self, arg: &'tcx hir::Ty<'tcx>) {
- match arg.kind {
- hir::TyKind::Path(hir::QPath::Resolved(None, path)) => match &path.segments {
- [PathSegment { res: Res::SelfTyParam { .. }, .. }] => {
- let impl_ty_name = None;
- self.selftys.push((path.span, impl_ty_name));
- }
- [PathSegment { res: Res::SelfTyAlias { alias_to: def_id, .. }, .. }] => {
- let impl_ty_name = Some(self.tcx.def_path_str(*def_id));
- self.selftys.push((path.span, impl_ty_name));
- }
- _ => {}
- },
- _ => {}
- }
- hir::intravisit::walk_ty(self, arg);
- }
- }
-
- if let ItemKind::OpaqueTy(&hir::OpaqueTy {
- origin: hir::OpaqueTyOrigin::AsyncFn(..) | hir::OpaqueTyOrigin::FnReturn(..),
- ..
- }) = item.kind
- {
- let args = GenericArgs::identity_for_item(tcx, def_id);
- let opaque_identity_ty = Ty::new_opaque(tcx, def_id.to_def_id(), args);
- let mut visitor = ProhibitOpaqueVisitor {
- opaque_identity_ty,
- parent_count: tcx.generics_of(def_id).parent_count as u32,
- references_parent_regions: false,
- tcx,
- selftys: vec![],
- };
- let prohibit_opaque = tcx
- .explicit_item_bounds(def_id)
- .instantiate_identity_iter_copied()
- .try_for_each(|(predicate, _)| predicate.visit_with(&mut visitor));
-
- if let Some(ty) = prohibit_opaque.break_value() {
- visitor.visit_item(&item);
- let is_async = match item.kind {
- ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) => {
- matches!(origin, hir::OpaqueTyOrigin::AsyncFn(..))
- }
- _ => unreachable!(),
- };
-
- let mut err = feature_err(
- &tcx.sess.parse_sess,
- sym::impl_trait_projections,
- span,
- format!(
- "`{}` return type cannot contain a projection or `Self` that references \
- lifetimes from a parent scope",
- if is_async { "async fn" } else { "impl Trait" },
- ),
- );
- for (span, name) in visitor.selftys {
- err.span_suggestion(
- span,
- "consider spelling out the type instead",
- name.unwrap_or_else(|| format!("{ty:?}")),
- Applicability::MaybeIncorrect,
- );
- }
- err.emit();
- }
- }
-}
-
/// Checks that an opaque type does not contain cycles.
pub(super) fn check_opaque_for_cycles<'tcx>(
tcx: TyCtxt<'tcx>,
@@ -640,7 +511,7 @@ fn check_item_type(tcx: TyCtxt<'_>, id: hir::ItemId) {
check_opaque(tcx, id);
}
}
- DefKind::TyAlias { .. } => {
+ DefKind::TyAlias => {
let pty_ty = tcx.type_of(id.owner_id).instantiate_identity();
let generics = tcx.generics_of(id.owner_id);
check_type_params_are_used(tcx, &generics, pty_ty);
@@ -831,7 +702,7 @@ fn check_impl_items_against_trait<'tcx>(
};
match ty_impl_item.kind {
ty::AssocKind::Const => {
- let _ = tcx.compare_impl_const((
+ tcx.ensure().compare_impl_const((
impl_item.expect_local(),
ty_impl_item.trait_item_def_id.unwrap(),
));
@@ -1138,19 +1009,19 @@ pub(super) fn check_transparent<'tcx>(tcx: TyCtxt<'tcx>, adt: ty::AdtDef<'tcx>)
return;
}
- // For each field, figure out if it's known to be a ZST and align(1), with "known"
- // respecting #[non_exhaustive] attributes.
+ // For each field, figure out if it's known to have "trivial" layout (i.e., is a 1-ZST), with
+ // "known" respecting #[non_exhaustive] attributes.
let field_infos = adt.all_fields().map(|field| {
let ty = field.ty(tcx, GenericArgs::identity_for_item(tcx, field.did));
let param_env = tcx.param_env(field.did);
let layout = tcx.layout_of(param_env.and(ty));
// We are currently checking the type this field came from, so it must be local
let span = tcx.hir().span_if_local(field.did).unwrap();
- let zst = layout.is_ok_and(|layout| layout.is_zst());
- let align = layout.ok().map(|layout| layout.align.abi.bytes());
- if !zst {
- return (span, zst, align, None);
+ let trivial = layout.is_ok_and(|layout| layout.is_1zst());
+ if !trivial {
+ return (span, trivial, None);
}
+ // Even some 1-ZST fields are not allowed though, if they have `non_exhaustive`.
fn check_non_exhaustive<'tcx>(
tcx: TyCtxt<'tcx>,
@@ -1184,58 +1055,52 @@ pub(super) fn check_transparent<'tcx>(tcx: TyCtxt<'tcx>, adt: ty::AdtDef<'tcx>)
}
}
- (span, zst, align, check_non_exhaustive(tcx, ty).break_value())
+ (span, trivial, check_non_exhaustive(tcx, ty).break_value())
});
- let non_zst_fields = field_infos
+ let non_trivial_fields = field_infos
.clone()
- .filter_map(|(span, zst, _align, _non_exhaustive)| if !zst { Some(span) } else { None });
- let non_zst_count = non_zst_fields.clone().count();
- if non_zst_count >= 2 {
- bad_non_zero_sized_fields(tcx, adt, non_zst_count, non_zst_fields, tcx.def_span(adt.did()));
+ .filter_map(|(span, trivial, _non_exhaustive)| if !trivial { Some(span) } else { None });
+ let non_trivial_count = non_trivial_fields.clone().count();
+ if non_trivial_count >= 2 {
+ bad_non_zero_sized_fields(
+ tcx,
+ adt,
+ non_trivial_count,
+ non_trivial_fields,
+ tcx.def_span(adt.did()),
+ );
+ return;
}
- let incompatible_zst_fields =
- field_infos.clone().filter(|(_, _, _, opt)| opt.is_some()).count();
- let incompat = incompatible_zst_fields + non_zst_count >= 2 && non_zst_count < 2;
- for (span, zst, align, non_exhaustive) in field_infos {
- if zst && align != Some(1) {
- let mut err = struct_span_err!(
- tcx.sess,
- span,
- E0691,
- "zero-sized field in transparent {} has alignment larger than 1",
- adt.descr(),
- );
-
- if let Some(align_bytes) = align {
- err.span_label(
+ let mut prev_non_exhaustive_1zst = false;
+ for (span, _trivial, non_exhaustive_1zst) in field_infos {
+ if let Some((descr, def_id, args, non_exhaustive)) = non_exhaustive_1zst {
+ // If there are any non-trivial fields, then there can be no non-exhaustive 1-zsts.
+ // Otherwise, it's only an issue if there's >1 non-exhaustive 1-zst.
+ if non_trivial_count > 0 || prev_non_exhaustive_1zst {
+ tcx.struct_span_lint_hir(
+ REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS,
+ tcx.hir().local_def_id_to_hir_id(adt.did().expect_local()),
span,
- format!("has alignment of {align_bytes}, which is larger than 1"),
- );
+ "zero-sized fields in `repr(transparent)` cannot \
+ contain external non-exhaustive types",
+ |lint| {
+ let note = if non_exhaustive {
+ "is marked with `#[non_exhaustive]`"
+ } else {
+ "contains private fields"
+ };
+ let field_ty = tcx.def_path_str_with_args(def_id, args);
+ lint.note(format!(
+ "this {descr} contains `{field_ty}`, which {note}, \
+ and makes it not a breaking change to become \
+ non-zero-sized in the future."
+ ))
+ },
+ )
} else {
- err.span_label(span, "may have alignment larger than 1");
+ prev_non_exhaustive_1zst = true;
}
-
- err.emit();
- }
- if incompat && let Some((descr, def_id, args, non_exhaustive)) = non_exhaustive {
- tcx.struct_span_lint_hir(
- REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS,
- tcx.hir().local_def_id_to_hir_id(adt.did().expect_local()),
- span,
- "zero-sized fields in `repr(transparent)` cannot contain external non-exhaustive types",
- |lint| {
- let note = if non_exhaustive {
- "is marked with `#[non_exhaustive]`"
- } else {
- "contains private fields"
- };
- let field_ty = tcx.def_path_str_with_args(def_id, args);
- lint
- .note(format!("this {descr} contains `{field_ty}`, which {note}, \
- and makes it not a breaking change to become non-zero-sized in the future."))
- },
- )
}
}
}
@@ -1585,13 +1450,7 @@ fn opaque_type_cycle_error(
label_match(capture.place.ty(), capture.get_path_span(tcx));
}
// Label any generator locals that capture the opaque
- for interior_ty in
- typeck_results.generator_interior_types.as_ref().skip_binder()
- {
- label_match(interior_ty.ty, interior_ty.span);
- }
- if tcx.sess.opts.unstable_opts.drop_tracking_mir
- && let DefKind::Generator = tcx.def_kind(closure_def_id)
+ if let DefKind::Generator = tcx.def_kind(closure_def_id)
&& let Some(generator_layout) = tcx.mir_generator_witnesses(closure_def_id)
{
for interior_ty in &generator_layout.field_tys {
@@ -1609,7 +1468,6 @@ fn opaque_type_cycle_error(
}
pub(super) fn check_generator_obligations(tcx: TyCtxt<'_>, def_id: LocalDefId) {
- debug_assert!(tcx.sess.opts.unstable_opts.drop_tracking_mir);
debug_assert!(matches!(tcx.def_kind(def_id), DefKind::Generator));
let typeck = tcx.typeck(def_id);
@@ -1632,6 +1490,25 @@ pub(super) fn check_generator_obligations(tcx: TyCtxt<'_>, def_id: LocalDefId) {
let obligation = Obligation::new(tcx, cause.clone(), param_env, *predicate);
fulfillment_cx.register_predicate_obligation(&infcx, obligation);
}
+
+ if (tcx.features().unsized_locals || tcx.features().unsized_fn_params)
+ && let Some(generator) = tcx.mir_generator_witnesses(def_id)
+ {
+ for field_ty in generator.field_tys.iter() {
+ fulfillment_cx.register_bound(
+ &infcx,
+ param_env,
+ field_ty.ty,
+ tcx.require_lang_item(hir::LangItem::Sized, Some(field_ty.source_info.span)),
+ ObligationCause::new(
+ field_ty.source_info.span,
+ def_id,
+ ObligationCauseCode::SizedGeneratorInterior(def_id),
+ ),
+ );
+ }
+ }
+
let errors = fulfillment_cx.select_all_or_error(&infcx);
debug!(?errors);
if !errors.is_empty() {
diff --git a/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs b/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs
index bd0ab6463..d081b0e35 100644
--- a/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs
+++ b/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs
@@ -14,11 +14,12 @@ use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKi
use rustc_infer::infer::{self, InferCtxt, TyCtxtInferExt};
use rustc_infer::traits::util;
use rustc_middle::ty::error::{ExpectedFound, TypeError};
+use rustc_middle::ty::fold::BottomUpFolder;
use rustc_middle::ty::util::ExplicitSelf;
use rustc_middle::ty::{
self, GenericArgs, Ty, TypeFoldable, TypeFolder, TypeSuperFoldable, TypeVisitableExt,
};
-use rustc_middle::ty::{GenericParamDefKind, ToPredicate, TyCtxt};
+use rustc_middle::ty::{GenericParamDefKind, TyCtxt};
use rustc_span::{Span, DUMMY_SP};
use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt;
use rustc_trait_selection::traits::outlives_bounds::InferCtxtExt as _;
@@ -28,6 +29,8 @@ use rustc_trait_selection::traits::{
use std::borrow::Cow;
use std::iter;
+mod refine;
+
/// Checks that a method from an impl conforms to the signature of
/// the same method as declared in the trait.
///
@@ -53,6 +56,12 @@ pub(super) fn compare_impl_method<'tcx>(
impl_trait_ref,
CheckImpliedWfMode::Check,
)?;
+ refine::check_refining_return_position_impl_trait_in_trait(
+ tcx,
+ impl_m,
+ trait_m,
+ impl_trait_ref,
+ );
};
}
@@ -587,7 +596,7 @@ fn compare_asyncness<'tcx>(
trait_m: ty::AssocItem,
delay: bool,
) -> Result<(), ErrorGuaranteed> {
- if tcx.asyncness(trait_m.def_id) == hir::IsAsync::Async {
+ if tcx.asyncness(trait_m.def_id).is_async() {
match tcx.fn_sig(impl_m.def_id).skip_binder().skip_binder().output().kind() {
ty::Alias(ty::Opaque, ..) => {
// allow both `async fn foo()` and `fn foo() -> impl Future`
@@ -653,8 +662,6 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
let trait_m = tcx.opt_associated_item(impl_m.trait_item_def_id.unwrap()).unwrap();
let impl_trait_ref =
tcx.impl_trait_ref(impl_m.impl_container(tcx).unwrap()).unwrap().instantiate_identity();
- let param_env = tcx.param_env(impl_m_def_id);
-
// First, check a few of the same things as `compare_impl_method`,
// just so we don't ICE during substitution later.
check_method_is_structurally_compatible(tcx, impl_m, trait_m, impl_trait_ref, true)?;
@@ -680,13 +687,26 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
let trait_to_placeholder_args =
impl_to_placeholder_args.rebase_onto(tcx, impl_m.container_id(tcx), trait_to_impl_args);
+ let hybrid_preds = tcx
+ .predicates_of(impl_m.container_id(tcx))
+ .instantiate_identity(tcx)
+ .into_iter()
+ .chain(tcx.predicates_of(trait_m.def_id).instantiate_own(tcx, trait_to_placeholder_args))
+ .map(|(clause, _)| clause);
+ let param_env = ty::ParamEnv::new(tcx.mk_clauses_from_iter(hybrid_preds), Reveal::UserFacing);
+ let param_env = traits::normalize_param_env_or_error(
+ tcx,
+ param_env,
+ ObligationCause::misc(tcx.def_span(impl_m_def_id), impl_m_def_id),
+ );
+
let infcx = &tcx.infer_ctxt().build();
let ocx = ObligationCtxt::new(infcx);
// Normalize the impl signature with fresh variables for lifetime inference.
- let norm_cause = ObligationCause::misc(return_span, impl_m_def_id);
+ let misc_cause = ObligationCause::misc(return_span, impl_m_def_id);
let impl_sig = ocx.normalize(
- &norm_cause,
+ &misc_cause,
param_env,
tcx.liberate_late_bound_regions(
impl_m.def_id,
@@ -717,12 +737,68 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
);
}
- let trait_sig = ocx.normalize(&norm_cause, param_env, unnormalized_trait_sig);
+ let trait_sig = ocx.normalize(&misc_cause, param_env, unnormalized_trait_sig);
trait_sig.error_reported()?;
let trait_return_ty = trait_sig.output();
+ // RPITITs are allowed to use the implied predicates of the method that
+ // defines them. This is because we want code like:
+ // ```
+ // trait Foo {
+ // fn test<'a, T>(_: &'a T) -> impl Sized;
+ // }
+ // impl Foo for () {
+ // fn test<'a, T>(x: &'a T) -> &'a T { x }
+ // }
+ // ```
+ // .. to compile. However, since we use both the normalized and unnormalized
+ // inputs and outputs from the substituted trait signature, we will end up
+ // seeing the hidden type of an RPIT in the signature itself. Naively, this
+ // means that we will use the hidden type to imply the hidden type's own
+ // well-formedness.
+ //
+ // To avoid this, we replace the infer vars used for hidden type inference
+ // with placeholders, which imply nothing about outlives bounds, and then
+ // prove below that the hidden types are well formed.
+ let universe = infcx.create_next_universe();
+ let mut idx = 0;
+ let mapping: FxHashMap<_, _> = collector
+ .types
+ .iter()
+ .map(|(_, &(ty, _))| {
+ assert!(
+ infcx.resolve_vars_if_possible(ty) == ty && ty.is_ty_var(),
+ "{ty:?} should not have been constrained via normalization",
+ ty = infcx.resolve_vars_if_possible(ty)
+ );
+ idx += 1;
+ (
+ ty,
+ Ty::new_placeholder(
+ tcx,
+ ty::Placeholder {
+ universe,
+ bound: ty::BoundTy {
+ var: ty::BoundVar::from_usize(idx),
+ kind: ty::BoundTyKind::Anon,
+ },
+ },
+ ),
+ )
+ })
+ .collect();
+ let mut type_mapper = BottomUpFolder {
+ tcx,
+ ty_op: |ty| *mapping.get(&ty).unwrap_or(&ty),
+ lt_op: |lt| lt,
+ ct_op: |ct| ct,
+ };
let wf_tys = FxIndexSet::from_iter(
- unnormalized_trait_sig.inputs_and_output.iter().chain(trait_sig.inputs_and_output.iter()),
+ unnormalized_trait_sig
+ .inputs_and_output
+ .iter()
+ .chain(trait_sig.inputs_and_output.iter())
+ .map(|ty| ty.fold_with(&mut type_mapper)),
);
match ocx.eq(&cause, param_env, trait_return_ty, impl_return_ty) {
@@ -779,6 +855,20 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
}
}
+ // FIXME: This has the same issue as #108544, but since this isn't breaking
+ // existing code, I'm not particularly inclined to do the same hack as above
+ // where we process wf obligations manually. This can be fixed in a forward-
+ // compatible way later.
+ let collected_types = collector.types;
+ for (_, &(ty, _)) in &collected_types {
+ ocx.register_obligation(traits::Obligation::new(
+ tcx,
+ misc_cause.clone(),
+ param_env,
+ ty::ClauseKind::WellFormed(ty.into()),
+ ));
+ }
+
// Check that all obligations are satisfied by the implementation's
// RPITs.
let errors = ocx.select_all_or_error();
@@ -787,8 +877,6 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
return Err(reported);
}
- let collected_types = collector.types;
-
// Finally, resolve all regions. This catches wily misuses of
// lifetime parameters.
let outlives_env = OutlivesEnvironment::with_bounds(
@@ -1126,7 +1214,10 @@ fn report_trait_method_mismatch<'tcx>(
&mut diag,
&cause,
trait_err_span.map(|sp| (sp, Cow::from("type in trait"))),
- Some(infer::ValuePairs::Sigs(ExpectedFound { expected: trait_sig, found: impl_sig })),
+ Some(infer::ValuePairs::PolySigs(ExpectedFound {
+ expected: ty::Binder::dummy(trait_sig),
+ found: ty::Binder::dummy(impl_sig),
+ })),
terr,
false,
false,
@@ -2188,16 +2279,16 @@ pub(super) fn check_type_bounds<'tcx>(
//
// impl<T> X for T where T: X { type Y = <T as X>::Y; }
}
- _ => predicates.push(
+ _ => predicates.push(ty::Clause::from_projection_clause(
+ tcx,
ty::Binder::bind_with_vars(
ty::ProjectionPredicate {
projection_ty: tcx.mk_alias_ty(trait_ty.def_id, rebased_args),
term: normalize_impl_ty.into(),
},
bound_vars,
- )
- .to_predicate(tcx),
- ),
+ ),
+ )),
};
ty::ParamEnv::new(tcx.mk_clauses(&predicates), Reveal::UserFacing)
};
diff --git a/compiler/rustc_hir_analysis/src/check/compare_impl_item/refine.rs b/compiler/rustc_hir_analysis/src/check/compare_impl_item/refine.rs
new file mode 100644
index 000000000..d9e0e87eb
--- /dev/null
+++ b/compiler/rustc_hir_analysis/src/check/compare_impl_item/refine.rs
@@ -0,0 +1,332 @@
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_infer::infer::{outlives::env::OutlivesEnvironment, TyCtxtInferExt};
+use rustc_lint_defs::builtin::REFINING_IMPL_TRAIT;
+use rustc_middle::traits::{ObligationCause, Reveal};
+use rustc_middle::ty::{
+ self, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperVisitable, TypeVisitable, TypeVisitor,
+};
+use rustc_span::{Span, DUMMY_SP};
+use rustc_trait_selection::traits::{
+ elaborate, normalize_param_env_or_error, outlives_bounds::InferCtxtExt, ObligationCtxt,
+};
+use std::ops::ControlFlow;
+
+/// Check that an implementation does not refine an RPITIT from a trait method signature.
+pub(super) fn check_refining_return_position_impl_trait_in_trait<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: ty::AssocItem,
+ trait_m: ty::AssocItem,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+) {
+ if !tcx.impl_method_has_trait_impl_trait_tys(impl_m.def_id) {
+ return;
+ }
+ // crate-private traits don't have any library guarantees, there's no need to do this check.
+ if !tcx.visibility(trait_m.container_id(tcx)).is_public() {
+ return;
+ }
+
+ // If a type in the trait ref is private, then there's also no reason to to do this check.
+ let impl_def_id = impl_m.container_id(tcx);
+ for arg in impl_trait_ref.args {
+ if let Some(ty) = arg.as_type()
+ && let Some(self_visibility) = type_visibility(tcx, ty)
+ && !self_visibility.is_public()
+ {
+ return;
+ }
+ }
+
+ let impl_m_args = ty::GenericArgs::identity_for_item(tcx, impl_m.def_id);
+ let trait_m_to_impl_m_args = impl_m_args.rebase_onto(tcx, impl_def_id, impl_trait_ref.args);
+ let bound_trait_m_sig = tcx.fn_sig(trait_m.def_id).instantiate(tcx, trait_m_to_impl_m_args);
+ let trait_m_sig = tcx.liberate_late_bound_regions(impl_m.def_id, bound_trait_m_sig);
+ // replace the self type of the trait ref with `Self` so that diagnostics render better.
+ let trait_m_sig_with_self_for_diag = tcx.liberate_late_bound_regions(
+ impl_m.def_id,
+ tcx.fn_sig(trait_m.def_id).instantiate(
+ tcx,
+ tcx.mk_args_from_iter(
+ [tcx.types.self_param.into()]
+ .into_iter()
+ .chain(trait_m_to_impl_m_args.iter().skip(1)),
+ ),
+ ),
+ );
+
+ let Ok(hidden_tys) = tcx.collect_return_position_impl_trait_in_trait_tys(impl_m.def_id) else {
+ // Error already emitted, no need to delay another.
+ return;
+ };
+
+ let mut collector = ImplTraitInTraitCollector { tcx, types: FxIndexSet::default() };
+ trait_m_sig.visit_with(&mut collector);
+
+ // Bound that we find on RPITITs in the trait signature.
+ let mut trait_bounds = vec![];
+ // Bounds that we find on the RPITITs in the impl signature.
+ let mut impl_bounds = vec![];
+
+ for trait_projection in collector.types.into_iter().rev() {
+ let impl_opaque_args = trait_projection.args.rebase_onto(tcx, trait_m.def_id, impl_m_args);
+ let hidden_ty = hidden_tys[&trait_projection.def_id].instantiate(tcx, impl_opaque_args);
+
+ // If the hidden type is not an opaque, then we have "refined" the trait signature.
+ let ty::Alias(ty::Opaque, impl_opaque) = *hidden_ty.kind() else {
+ report_mismatched_rpitit_signature(
+ tcx,
+ trait_m_sig_with_self_for_diag,
+ trait_m.def_id,
+ impl_m.def_id,
+ None,
+ );
+ return;
+ };
+
+ // This opaque also needs to be from the impl method -- otherwise,
+ // it's a refinement to a TAIT.
+ if !tcx.hir().get_if_local(impl_opaque.def_id).map_or(false, |node| {
+ matches!(
+ node.expect_item().expect_opaque_ty().origin,
+ hir::OpaqueTyOrigin::AsyncFn(def_id) | hir::OpaqueTyOrigin::FnReturn(def_id)
+ if def_id == impl_m.def_id.expect_local()
+ )
+ }) {
+ report_mismatched_rpitit_signature(
+ tcx,
+ trait_m_sig_with_self_for_diag,
+ trait_m.def_id,
+ impl_m.def_id,
+ None,
+ );
+ return;
+ }
+
+ trait_bounds.extend(
+ tcx.item_bounds(trait_projection.def_id).iter_instantiated(tcx, trait_projection.args),
+ );
+ impl_bounds.extend(elaborate(
+ tcx,
+ tcx.explicit_item_bounds(impl_opaque.def_id)
+ .iter_instantiated_copied(tcx, impl_opaque.args),
+ ));
+ }
+
+ let hybrid_preds = tcx
+ .predicates_of(impl_def_id)
+ .instantiate_identity(tcx)
+ .into_iter()
+ .chain(tcx.predicates_of(trait_m.def_id).instantiate_own(tcx, trait_m_to_impl_m_args))
+ .map(|(clause, _)| clause);
+ let param_env = ty::ParamEnv::new(tcx.mk_clauses_from_iter(hybrid_preds), Reveal::UserFacing);
+ let param_env = normalize_param_env_or_error(tcx, param_env, ObligationCause::dummy());
+
+ let ref infcx = tcx.infer_ctxt().build();
+ let ocx = ObligationCtxt::new(infcx);
+
+ // Normalize the bounds. This has two purposes:
+ //
+ // 1. Project the RPITIT projections from the trait to the opaques on the impl,
+ // which means that they don't need to be mapped manually.
+ //
+ // 2. Project any other projections that show up in the bound. That makes sure that
+ // we don't consider `tests/ui/async-await/in-trait/async-associated-types.rs`
+ // to be refining.
+ let (trait_bounds, impl_bounds) =
+ ocx.normalize(&ObligationCause::dummy(), param_env, (trait_bounds, impl_bounds));
+
+ // Since we've normalized things, we need to resolve regions, since we'll
+ // possibly have introduced region vars during projection. We don't expect
+ // this resolution to have incurred any region errors -- but if we do, then
+ // just delay a bug.
+ let mut implied_wf_types = FxIndexSet::default();
+ implied_wf_types.extend(trait_m_sig.inputs_and_output);
+ implied_wf_types.extend(ocx.normalize(
+ &ObligationCause::dummy(),
+ param_env,
+ trait_m_sig.inputs_and_output,
+ ));
+ if !ocx.select_all_or_error().is_empty() {
+ tcx.sess.delay_span_bug(
+ DUMMY_SP,
+ "encountered errors when checking RPITIT refinement (selection)",
+ );
+ return;
+ }
+ let outlives_env = OutlivesEnvironment::with_bounds(
+ param_env,
+ infcx.implied_bounds_tys(param_env, impl_m.def_id.expect_local(), implied_wf_types),
+ );
+ let errors = infcx.resolve_regions(&outlives_env);
+ if !errors.is_empty() {
+ tcx.sess.delay_span_bug(
+ DUMMY_SP,
+ "encountered errors when checking RPITIT refinement (regions)",
+ );
+ return;
+ }
+ // Resolve any lifetime variables that may have been introduced during normalization.
+ let Ok((trait_bounds, impl_bounds)) = infcx.fully_resolve((trait_bounds, impl_bounds)) else {
+ tcx.sess.delay_span_bug(
+ DUMMY_SP,
+ "encountered errors when checking RPITIT refinement (resolution)",
+ );
+ return;
+ };
+
+ // For quicker lookup, use an `IndexSet` (we don't use one earlier because
+ // it's not foldable..).
+ // Also, We have to anonymize binders in these types because they may contain
+ // `BrNamed` bound vars, which contain unique `DefId`s which correspond to syntax
+ // locations that we don't care about when checking bound equality.
+ let trait_bounds = FxIndexSet::from_iter(trait_bounds.fold_with(&mut Anonymize { tcx }));
+ let impl_bounds = impl_bounds.fold_with(&mut Anonymize { tcx });
+
+ // Find any clauses that are present in the impl's RPITITs that are not
+ // present in the trait's RPITITs. This will trigger on trivial predicates,
+ // too, since we *do not* use the trait solver to prove that the RPITIT's
+ // bounds are not stronger -- we're doing a simple, syntactic compatibility
+ // check between bounds. This is strictly forwards compatible, though.
+ for (clause, span) in impl_bounds {
+ if !trait_bounds.contains(&clause) {
+ report_mismatched_rpitit_signature(
+ tcx,
+ trait_m_sig_with_self_for_diag,
+ trait_m.def_id,
+ impl_m.def_id,
+ Some(span),
+ );
+ return;
+ }
+ }
+}
+
+struct ImplTraitInTraitCollector<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ types: FxIndexSet<ty::AliasTy<'tcx>>,
+}
+
+impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for ImplTraitInTraitCollector<'tcx> {
+ type BreakTy = !;
+
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> std::ops::ControlFlow<Self::BreakTy> {
+ if let ty::Alias(ty::Projection, proj) = *ty.kind()
+ && self.tcx.is_impl_trait_in_trait(proj.def_id)
+ {
+ if self.types.insert(proj) {
+ for (pred, _) in self
+ .tcx
+ .explicit_item_bounds(proj.def_id)
+ .iter_instantiated_copied(self.tcx, proj.args)
+ {
+ pred.visit_with(self)?;
+ }
+ }
+ ControlFlow::Continue(())
+ } else {
+ ty.super_visit_with(self)
+ }
+ }
+}
+
+fn report_mismatched_rpitit_signature<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_m_sig: ty::FnSig<'tcx>,
+ trait_m_def_id: DefId,
+ impl_m_def_id: DefId,
+ unmatched_bound: Option<Span>,
+) {
+ let mapping = std::iter::zip(
+ tcx.fn_sig(trait_m_def_id).skip_binder().bound_vars(),
+ tcx.fn_sig(impl_m_def_id).skip_binder().bound_vars(),
+ )
+ .filter_map(|(impl_bv, trait_bv)| {
+ if let ty::BoundVariableKind::Region(impl_bv) = impl_bv
+ && let ty::BoundVariableKind::Region(trait_bv) = trait_bv
+ {
+ Some((impl_bv, trait_bv))
+ } else {
+ None
+ }
+ })
+ .collect();
+
+ let mut return_ty =
+ trait_m_sig.output().fold_with(&mut super::RemapLateBound { tcx, mapping: &mapping });
+
+ if tcx.asyncness(impl_m_def_id).is_async() && tcx.asyncness(trait_m_def_id).is_async() {
+ let ty::Alias(ty::Projection, future_ty) = return_ty.kind() else {
+ bug!();
+ };
+ let Some(future_output_ty) = tcx
+ .explicit_item_bounds(future_ty.def_id)
+ .iter_instantiated_copied(tcx, future_ty.args)
+ .find_map(|(clause, _)| match clause.kind().no_bound_vars()? {
+ ty::ClauseKind::Projection(proj) => proj.term.ty(),
+ _ => None,
+ })
+ else {
+ bug!()
+ };
+ return_ty = future_output_ty;
+ }
+
+ let (span, impl_return_span, pre, post) =
+ match tcx.hir().get_by_def_id(impl_m_def_id.expect_local()).fn_decl().unwrap().output {
+ hir::FnRetTy::DefaultReturn(span) => (tcx.def_span(impl_m_def_id), span, "-> ", " "),
+ hir::FnRetTy::Return(ty) => (ty.span, ty.span, "", ""),
+ };
+ let trait_return_span =
+ tcx.hir().get_if_local(trait_m_def_id).map(|node| match node.fn_decl().unwrap().output {
+ hir::FnRetTy::DefaultReturn(_) => tcx.def_span(trait_m_def_id),
+ hir::FnRetTy::Return(ty) => ty.span,
+ });
+
+ let span = unmatched_bound.unwrap_or(span);
+ tcx.emit_spanned_lint(
+ REFINING_IMPL_TRAIT,
+ tcx.local_def_id_to_hir_id(impl_m_def_id.expect_local()),
+ span,
+ crate::errors::ReturnPositionImplTraitInTraitRefined {
+ impl_return_span,
+ trait_return_span,
+ pre,
+ post,
+ return_ty,
+ unmatched_bound,
+ },
+ );
+}
+
+fn type_visibility<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<ty::Visibility<DefId>> {
+ match *ty.kind() {
+ ty::Ref(_, ty, _) => type_visibility(tcx, ty),
+ ty::Adt(def, args) => {
+ if def.is_fundamental() {
+ type_visibility(tcx, args.type_at(0))
+ } else {
+ Some(tcx.visibility(def.did()))
+ }
+ }
+ _ => None,
+ }
+}
+
+struct Anonymize<'tcx> {
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> TypeFolder<TyCtxt<'tcx>> for Anonymize<'tcx> {
+ fn interner(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_binder<T>(&mut self, t: ty::Binder<'tcx, T>) -> ty::Binder<'tcx, T>
+ where
+ T: TypeFoldable<TyCtxt<'tcx>>,
+ {
+ self.tcx.anonymize_bound_vars(t)
+ }
+}
diff --git a/compiler/rustc_hir_analysis/src/check/entry.rs b/compiler/rustc_hir_analysis/src/check/entry.rs
index fcaefe026..3cd3f5bcf 100644
--- a/compiler/rustc_hir_analysis/src/check/entry.rs
+++ b/compiler/rustc_hir_analysis/src/check/entry.rs
@@ -11,8 +11,8 @@ use rustc_trait_selection::traits::{self, ObligationCause, ObligationCauseCode};
use std::ops::Not;
+use super::check_function_signature;
use crate::errors;
-use crate::require_same_types;
pub(crate) fn check_for_entry_fn(tcx: TyCtxt<'_>) {
match tcx.entry_fn(()) {
@@ -112,7 +112,7 @@ fn check_main_fn_ty(tcx: TyCtxt<'_>, main_def_id: DefId) {
}
let main_asyncness = tcx.asyncness(main_def_id);
- if let hir::IsAsync::Async = main_asyncness {
+ if main_asyncness.is_async() {
let asyncness_span = main_fn_asyncness_span(tcx, main_def_id);
tcx.sess.emit_err(errors::MainFunctionAsync { span: main_span, asyncness: asyncness_span });
error = true;
@@ -162,33 +162,33 @@ fn check_main_fn_ty(tcx: TyCtxt<'_>, main_def_id: DefId) {
error = true;
}
// now we can take the return type of the given main function
- expected_return_type = main_fnsig.output();
+ expected_return_type = norm_return_ty;
} else {
// standard () main return type
- expected_return_type = ty::Binder::dummy(Ty::new_unit(tcx));
+ expected_return_type = tcx.types.unit;
}
if error {
return;
}
- let se_ty = Ty::new_fn_ptr(
- tcx,
- expected_return_type.map_bound(|expected_return_type| {
- tcx.mk_fn_sig([], expected_return_type, false, hir::Unsafety::Normal, Abi::Rust)
- }),
- );
+ let expected_sig = ty::Binder::dummy(tcx.mk_fn_sig(
+ [],
+ expected_return_type,
+ false,
+ hir::Unsafety::Normal,
+ Abi::Rust,
+ ));
- require_same_types(
+ check_function_signature(
tcx,
- &ObligationCause::new(
+ ObligationCause::new(
main_span,
main_diagnostics_def_id,
ObligationCauseCode::MainFunctionType,
),
- param_env,
- se_ty,
- Ty::new_fn_ptr(tcx, main_fnsig),
+ main_def_id,
+ expected_sig,
);
}
@@ -212,7 +212,7 @@ fn check_start_fn_ty(tcx: TyCtxt<'_>, start_def_id: DefId) {
});
error = true;
}
- if let hir::IsAsync::Async = sig.header.asyncness {
+ if sig.header.asyncness.is_async() {
let span = tcx.def_span(it.owner_id);
tcx.sess.emit_err(errors::StartAsync { span: span });
error = true;
@@ -247,27 +247,23 @@ fn check_start_fn_ty(tcx: TyCtxt<'_>, start_def_id: DefId) {
}
}
- let se_ty = Ty::new_fn_ptr(
- tcx,
- ty::Binder::dummy(tcx.mk_fn_sig(
- [tcx.types.isize, Ty::new_imm_ptr(tcx, Ty::new_imm_ptr(tcx, tcx.types.u8))],
- tcx.types.isize,
- false,
- hir::Unsafety::Normal,
- Abi::Rust,
- )),
- );
+ let expected_sig = ty::Binder::dummy(tcx.mk_fn_sig(
+ [tcx.types.isize, Ty::new_imm_ptr(tcx, Ty::new_imm_ptr(tcx, tcx.types.u8))],
+ tcx.types.isize,
+ false,
+ hir::Unsafety::Normal,
+ Abi::Rust,
+ ));
- require_same_types(
+ check_function_signature(
tcx,
- &ObligationCause::new(
+ ObligationCause::new(
start_span,
start_def_id,
ObligationCauseCode::StartFunctionType,
),
- ty::ParamEnv::empty(), // start should not have any where bounds.
- se_ty,
- Ty::new_fn_ptr(tcx, tcx.fn_sig(start_def_id).instantiate_identity()),
+ start_def_id.into(),
+ expected_sig,
);
}
_ => {
diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
index f89e2e5c2..c61719c1f 100644
--- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs
+++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
@@ -1,11 +1,11 @@
//! Type-checking for the rust-intrinsic and platform-intrinsic
//! intrinsics that the compiler exposes.
+use crate::check::check_function_signature;
use crate::errors::{
UnrecognizedAtomicOperation, UnrecognizedIntrinsicFunction,
WrongNumberOfGenericArgumentsToIntrinsic,
};
-use crate::require_same_types;
use hir::def_id::DefId;
use rustc_errors::{struct_span_err, DiagnosticMessage};
@@ -20,6 +20,7 @@ fn equate_intrinsic_type<'tcx>(
it: &hir::ForeignItem<'_>,
n_tps: usize,
n_lts: usize,
+ n_cts: usize,
sig: ty::PolyFnSig<'tcx>,
) {
let (own_counts, span) = match &it.kind {
@@ -51,17 +52,14 @@ fn equate_intrinsic_type<'tcx>(
if gen_count_ok(own_counts.lifetimes, n_lts, "lifetime")
&& gen_count_ok(own_counts.types, n_tps, "type")
- && gen_count_ok(own_counts.consts, 0, "const")
+ && gen_count_ok(own_counts.consts, n_cts, "const")
{
- let fty = Ty::new_fn_ptr(tcx, sig);
let it_def_id = it.owner_id.def_id;
- let cause = ObligationCause::new(it.span, it_def_id, ObligationCauseCode::IntrinsicType);
- require_same_types(
+ check_function_signature(
tcx,
- &cause,
- ty::ParamEnv::empty(), // FIXME: do all intrinsics have an empty param env?
- Ty::new_fn_ptr(tcx, tcx.fn_sig(it.owner_id).instantiate_identity()),
- fty,
+ ObligationCause::new(it.span, it_def_id, ObligationCauseCode::IntrinsicType),
+ it_def_id.into(),
+ sig,
);
}
}
@@ -140,7 +138,7 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
let name_str = intrinsic_name.as_str();
let bound_vars = tcx.mk_bound_variable_kinds(&[
- ty::BoundVariableKind::Region(ty::BrAnon(None)),
+ ty::BoundVariableKind::Region(ty::BrAnon),
ty::BoundVariableKind::Region(ty::BrEnv),
]);
let mk_va_list_ty = |mutbl| {
@@ -148,7 +146,7 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
let region = ty::Region::new_late_bound(
tcx,
ty::INNERMOST,
- ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon(None) },
+ ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon },
);
let env_region = ty::Region::new_late_bound(
tcx,
@@ -408,7 +406,7 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
);
let discriminant_def_id = assoc_items[0];
- let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon(None) };
+ let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon };
(
1,
vec![Ty::new_imm_ref(
@@ -466,7 +464,7 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
}
sym::raw_eq => {
- let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon(None) };
+ let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon };
let param_ty = Ty::new_imm_ref(
tcx,
ty::Region::new_late_bound(tcx, ty::INNERMOST, br),
@@ -492,7 +490,7 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
};
let sig = tcx.mk_fn_sig(inputs, output, false, unsafety, Abi::RustIntrinsic);
let sig = ty::Binder::bind_with_vars(sig, bound_vars);
- equate_intrinsic_type(tcx, it, n_tps, n_lts, sig)
+ equate_intrinsic_type(tcx, it, n_tps, n_lts, 0, sig)
}
/// Type-check `extern "platform-intrinsic" { ... }` functions.
@@ -504,9 +502,9 @@ pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>)
let name = it.ident.name;
- let (n_tps, inputs, output) = match name {
+ let (n_tps, n_cts, inputs, output) = match name {
sym::simd_eq | sym::simd_ne | sym::simd_lt | sym::simd_le | sym::simd_gt | sym::simd_ge => {
- (2, vec![param(0), param(0)], param(1))
+ (2, 0, vec![param(0), param(0)], param(1))
}
sym::simd_add
| sym::simd_sub
@@ -522,8 +520,8 @@ pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>)
| sym::simd_fmax
| sym::simd_fpow
| sym::simd_saturating_add
- | sym::simd_saturating_sub => (1, vec![param(0), param(0)], param(0)),
- sym::simd_arith_offset => (2, vec![param(0), param(1)], param(0)),
+ | sym::simd_saturating_sub => (1, 0, vec![param(0), param(0)], param(0)),
+ sym::simd_arith_offset => (2, 0, vec![param(0), param(1)], param(0)),
sym::simd_neg
| sym::simd_bswap
| sym::simd_bitreverse
@@ -541,25 +539,25 @@ pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>)
| sym::simd_ceil
| sym::simd_floor
| sym::simd_round
- | sym::simd_trunc => (1, vec![param(0)], param(0)),
- sym::simd_fpowi => (1, vec![param(0), tcx.types.i32], param(0)),
- sym::simd_fma => (1, vec![param(0), param(0), param(0)], param(0)),
- sym::simd_gather => (3, vec![param(0), param(1), param(2)], param(0)),
- sym::simd_scatter => (3, vec![param(0), param(1), param(2)], Ty::new_unit(tcx)),
- sym::simd_insert => (2, vec![param(0), tcx.types.u32, param(1)], param(0)),
- sym::simd_extract => (2, vec![param(0), tcx.types.u32], param(1)),
+ | sym::simd_trunc => (1, 0, vec![param(0)], param(0)),
+ sym::simd_fpowi => (1, 0, vec![param(0), tcx.types.i32], param(0)),
+ sym::simd_fma => (1, 0, vec![param(0), param(0), param(0)], param(0)),
+ sym::simd_gather => (3, 0, vec![param(0), param(1), param(2)], param(0)),
+ sym::simd_scatter => (3, 0, vec![param(0), param(1), param(2)], Ty::new_unit(tcx)),
+ sym::simd_insert => (2, 0, vec![param(0), tcx.types.u32, param(1)], param(0)),
+ sym::simd_extract => (2, 0, vec![param(0), tcx.types.u32], param(1)),
sym::simd_cast
| sym::simd_as
| sym::simd_cast_ptr
| sym::simd_expose_addr
- | sym::simd_from_exposed_addr => (2, vec![param(0)], param(1)),
- sym::simd_bitmask => (2, vec![param(0)], param(1)),
+ | sym::simd_from_exposed_addr => (2, 0, vec![param(0)], param(1)),
+ sym::simd_bitmask => (2, 0, vec![param(0)], param(1)),
sym::simd_select | sym::simd_select_bitmask => {
- (2, vec![param(0), param(1), param(1)], param(1))
+ (2, 0, vec![param(0), param(1), param(1)], param(1))
}
- sym::simd_reduce_all | sym::simd_reduce_any => (1, vec![param(0)], tcx.types.bool),
+ sym::simd_reduce_all | sym::simd_reduce_any => (1, 0, vec![param(0)], tcx.types.bool),
sym::simd_reduce_add_ordered | sym::simd_reduce_mul_ordered => {
- (2, vec![param(0), param(1)], param(1))
+ (2, 0, vec![param(0), param(1)], param(1))
}
sym::simd_reduce_add_unordered
| sym::simd_reduce_mul_unordered
@@ -569,8 +567,9 @@ pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>)
| sym::simd_reduce_min
| sym::simd_reduce_max
| sym::simd_reduce_min_nanless
- | sym::simd_reduce_max_nanless => (2, vec![param(0)], param(1)),
- sym::simd_shuffle => (3, vec![param(0), param(0), param(1)], param(2)),
+ | sym::simd_reduce_max_nanless => (2, 0, vec![param(0)], param(1)),
+ sym::simd_shuffle => (3, 0, vec![param(0), param(0), param(1)], param(2)),
+ sym::simd_shuffle_generic => (2, 1, vec![param(0), param(0)], param(1)),
_ => {
let msg = format!("unrecognized platform-specific intrinsic function: `{name}`");
tcx.sess.struct_span_err(it.span, msg).emit();
@@ -580,5 +579,5 @@ pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>)
let sig = tcx.mk_fn_sig(inputs, output, false, hir::Unsafety::Unsafe, Abi::PlatformIntrinsic);
let sig = ty::Binder::dummy(sig);
- equate_intrinsic_type(tcx, it, n_tps, 0, sig)
+ equate_intrinsic_type(tcx, it, n_tps, 0, n_cts, sig)
}
diff --git a/compiler/rustc_hir_analysis/src/check/intrinsicck.rs b/compiler/rustc_hir_analysis/src/check/intrinsicck.rs
index 945953edd..cd7e99172 100644
--- a/compiler/rustc_hir_analysis/src/check/intrinsicck.rs
+++ b/compiler/rustc_hir_analysis/src/check/intrinsicck.rs
@@ -44,20 +44,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
false
}
- fn check_asm_operand_type(
- &self,
- idx: usize,
- reg: InlineAsmRegOrRegClass,
- expr: &'tcx hir::Expr<'tcx>,
- template: &[InlineAsmTemplatePiece],
- is_input: bool,
- tied_input: Option<(&'tcx hir::Expr<'tcx>, Option<InlineAsmType>)>,
- target_features: &FxIndexSet<Symbol>,
- ) -> Option<InlineAsmType> {
- let ty = (self.get_operand_ty)(expr);
- if ty.has_non_region_infer() {
- bug!("inference variable in asm operand ty: {:?} {:?}", expr, ty);
- }
+ fn get_asm_ty(&self, ty: Ty<'tcx>) -> Option<InlineAsmType> {
let asm_ty_isize = match self.tcx.sess.target.pointer_width {
16 => InlineAsmType::I16,
32 => InlineAsmType::I32,
@@ -65,10 +52,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
_ => unreachable!(),
};
- let asm_ty = match *ty.kind() {
- // `!` is allowed for input but not for output (issue #87802)
- ty::Never if is_input => return None,
- _ if ty.references_error() => return None,
+ match *ty.kind() {
ty::Int(IntTy::I8) | ty::Uint(UintTy::U8) => Some(InlineAsmType::I8),
ty::Int(IntTy::I16) | ty::Uint(UintTy::U16) => Some(InlineAsmType::I16),
ty::Int(IntTy::I32) | ty::Uint(UintTy::U32) => Some(InlineAsmType::I32),
@@ -99,7 +83,6 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
};
match ty.kind() {
- ty::Never | ty::Error(_) => return None,
ty::Int(IntTy::I8) | ty::Uint(UintTy::U8) => Some(InlineAsmType::VecI8(size)),
ty::Int(IntTy::I16) | ty::Uint(UintTy::U16) => {
Some(InlineAsmType::VecI16(size))
@@ -128,6 +111,38 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
}
ty::Infer(_) => unreachable!(),
_ => None,
+ }
+ }
+
+ fn check_asm_operand_type(
+ &self,
+ idx: usize,
+ reg: InlineAsmRegOrRegClass,
+ expr: &'tcx hir::Expr<'tcx>,
+ template: &[InlineAsmTemplatePiece],
+ is_input: bool,
+ tied_input: Option<(&'tcx hir::Expr<'tcx>, Option<InlineAsmType>)>,
+ target_features: &FxIndexSet<Symbol>,
+ ) -> Option<InlineAsmType> {
+ let ty = (self.get_operand_ty)(expr);
+ if ty.has_non_region_infer() {
+ bug!("inference variable in asm operand ty: {:?} {:?}", expr, ty);
+ }
+
+ let asm_ty = match *ty.kind() {
+ // `!` is allowed for input but not for output (issue #87802)
+ ty::Never if is_input => return None,
+ _ if ty.references_error() => return None,
+ ty::Adt(adt, args) if Some(adt.did()) == self.tcx.lang_items().maybe_uninit() => {
+ let fields = &adt.non_enum_variant().fields;
+ let ty = fields[FieldIdx::from_u32(1)].ty(self.tcx, args);
+ let ty::Adt(ty, args) = ty.kind() else { unreachable!() };
+ assert!(ty.is_manually_drop());
+ let fields = &ty.non_enum_variant().fields;
+ let ty = fields[FieldIdx::from_u32(0)].ty(self.tcx, args);
+ self.get_asm_ty(ty)
+ }
+ _ => self.get_asm_ty(ty),
};
let Some(asm_ty) = asm_ty else {
let msg = format!("cannot use value of type `{ty}` for inline assembly");
diff --git a/compiler/rustc_hir_analysis/src/check/mod.rs b/compiler/rustc_hir_analysis/src/check/mod.rs
index 4cf358732..5fa65f33c 100644
--- a/compiler/rustc_hir_analysis/src/check/mod.rs
+++ b/compiler/rustc_hir_analysis/src/check/mod.rs
@@ -73,23 +73,31 @@ pub mod wfcheck;
pub use check::check_abi;
+use std::num::NonZeroU32;
+
use check::check_mod_item_types;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_errors::{pluralize, struct_span_err, Diagnostic, DiagnosticBuilder};
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::intravisit::Visitor;
use rustc_index::bit_set::BitSet;
+use rustc_infer::infer::error_reporting::ObligationCauseExt as _;
+use rustc_infer::infer::outlives::env::OutlivesEnvironment;
+use rustc_infer::infer::{self, TyCtxtInferExt as _};
+use rustc_infer::traits::ObligationCause;
use rustc_middle::query::Providers;
+use rustc_middle::ty::error::{ExpectedFound, TypeError};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::ty::{GenericArgs, GenericArgsRef};
use rustc_session::parse::feature_err;
use rustc_span::source_map::DUMMY_SP;
use rustc_span::symbol::{kw, Ident};
-use rustc_span::{self, BytePos, Span, Symbol};
+use rustc_span::{self, def_id::CRATE_DEF_ID, BytePos, Span, Symbol};
use rustc_target::abi::VariantIdx;
use rustc_target::spec::abi::Abi;
use rustc_trait_selection::traits::error_reporting::suggestions::ReturnsVisitor;
-use std::num::NonZeroU32;
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
+use rustc_trait_selection::traits::ObligationCtxt;
use crate::errors;
use crate::require_c_abi_if_c_variadic;
@@ -289,6 +297,7 @@ fn default_body_is_unstable(
&tcx.sess.parse_sess,
feature,
rustc_feature::GateIssue::Library(issue),
+ false,
);
err.emit();
@@ -320,41 +329,52 @@ fn bounds_from_generic_predicates<'tcx>(
_ => {}
}
}
- let generics = if types.is_empty() {
- "".to_string()
- } else {
- format!(
- "<{}>",
- types
- .keys()
- .filter_map(|t| match t.kind() {
- ty::Param(_) => Some(t.to_string()),
- // Avoid suggesting the following:
- // fn foo<T, <T as Trait>::Bar>(_: T) where T: Trait, <T as Trait>::Bar: Other {}
- _ => None,
- })
- .collect::<Vec<_>>()
- .join(", ")
- )
- };
+
let mut where_clauses = vec![];
+ let mut types_str = vec![];
for (ty, bounds) in types {
- where_clauses
- .extend(bounds.into_iter().map(|bound| format!("{}: {}", ty, tcx.def_path_str(bound))));
- }
- for projection in &projections {
- let p = projection.skip_binder();
- // FIXME: this is not currently supported syntax, we should be looking at the `types` and
- // insert the associated types where they correspond, but for now let's be "lazy" and
- // propose this instead of the following valid resugaring:
- // `T: Trait, Trait::Assoc = K` → `T: Trait<Assoc = K>`
- where_clauses.push(format!("{} = {}", tcx.def_path_str(p.projection_ty.def_id), p.term));
+ if let ty::Param(_) = ty.kind() {
+ let mut bounds_str = vec![];
+ for bound in bounds {
+ let mut projections_str = vec![];
+ for projection in &projections {
+ let p = projection.skip_binder();
+ let alias_ty = p.projection_ty;
+ if bound == tcx.parent(alias_ty.def_id) && alias_ty.self_ty() == ty {
+ let name = tcx.item_name(alias_ty.def_id);
+ projections_str.push(format!("{} = {}", name, p.term));
+ }
+ }
+ let bound_def_path = tcx.def_path_str(bound);
+ if projections_str.is_empty() {
+ where_clauses.push(format!("{}: {}", ty, bound_def_path));
+ } else {
+ bounds_str.push(format!("{}<{}>", bound_def_path, projections_str.join(", ")));
+ }
+ }
+ if bounds_str.is_empty() {
+ types_str.push(ty.to_string());
+ } else {
+ types_str.push(format!("{}: {}", ty, bounds_str.join(" + ")));
+ }
+ } else {
+ // Avoid suggesting the following:
+ // fn foo<T, <T as Trait>::Bar>(_: T) where T: Trait, <T as Trait>::Bar: Other {}
+ where_clauses.extend(
+ bounds.into_iter().map(|bound| format!("{}: {}", ty, tcx.def_path_str(bound))),
+ );
+ }
}
+
+ let generics =
+ if types_str.is_empty() { "".to_string() } else { format!("<{}>", types_str.join(", ")) };
+
let where_clauses = if where_clauses.is_empty() {
- String::new()
+ "".to_string()
} else {
format!(" where {}", where_clauses.join(", "))
};
+
(generics, where_clauses)
}
@@ -545,3 +565,76 @@ fn bad_non_zero_sized_fields<'tcx>(
pub fn potentially_plural_count(count: usize, word: &str) -> String {
format!("{} {}{}", count, word, pluralize!(count))
}
+
+pub fn check_function_signature<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mut cause: ObligationCause<'tcx>,
+ fn_id: DefId,
+ expected_sig: ty::PolyFnSig<'tcx>,
+) {
+ let local_id = fn_id.as_local().unwrap_or(CRATE_DEF_ID);
+
+ let param_env = ty::ParamEnv::empty();
+
+ let infcx = &tcx.infer_ctxt().build();
+ let ocx = ObligationCtxt::new(infcx);
+
+ let actual_sig = tcx.fn_sig(fn_id).instantiate_identity();
+
+ let norm_cause = ObligationCause::misc(cause.span, local_id);
+ let actual_sig = ocx.normalize(&norm_cause, param_env, actual_sig);
+
+ match ocx.eq(&cause, param_env, expected_sig, actual_sig) {
+ Ok(()) => {
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ infcx.err_ctxt().report_fulfillment_errors(&errors);
+ return;
+ }
+ }
+ Err(err) => {
+ let err_ctxt = infcx.err_ctxt();
+ if fn_id.is_local() {
+ cause.span = extract_span_for_error_reporting(tcx, err, &cause, local_id);
+ }
+ let failure_code = cause.as_failure_code_diag(err, cause.span, vec![]);
+ let mut diag = tcx.sess.create_err(failure_code);
+ err_ctxt.note_type_err(
+ &mut diag,
+ &cause,
+ None,
+ Some(infer::ValuePairs::PolySigs(ExpectedFound {
+ expected: expected_sig,
+ found: actual_sig,
+ })),
+ err,
+ false,
+ false,
+ );
+ diag.emit();
+ return;
+ }
+ }
+
+ let outlives_env = OutlivesEnvironment::new(param_env);
+ let _ = ocx.resolve_regions_and_report_errors(local_id, &outlives_env);
+
+ fn extract_span_for_error_reporting<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ err: TypeError<'_>,
+ cause: &ObligationCause<'tcx>,
+ fn_id: LocalDefId,
+ ) -> rustc_span::Span {
+ let mut args = {
+ let node = tcx.hir().expect_owner(fn_id);
+ let decl = node.fn_decl().unwrap_or_else(|| bug!("expected fn decl, found {:?}", node));
+ decl.inputs.iter().map(|t| t.span).chain(std::iter::once(decl.output.span()))
+ };
+
+ match err {
+ TypeError::ArgumentMutability(i)
+ | TypeError::ArgumentSorts(ExpectedFound { .. }, i) => args.nth(i).unwrap(),
+ _ => cause.span(),
+ }
+ }
+}
diff --git a/compiler/rustc_hir_analysis/src/check/region.rs b/compiler/rustc_hir_analysis/src/check/region.rs
index 5bd6fcb96..463fab93e 100644
--- a/compiler/rustc_hir_analysis/src/check/region.rs
+++ b/compiler/rustc_hir_analysis/src/check/region.rs
@@ -149,7 +149,7 @@ fn resolve_block<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, blk: &'tcx h
// From now on, we continue normally.
visitor.cx = prev_cx;
}
- hir::StmtKind::Local(..) | hir::StmtKind::Item(..) => {
+ hir::StmtKind::Local(..) => {
// Each declaration introduces a subscope for bindings
// introduced by the declaration; this subscope covers a
// suffix of the block. Each subscope in a block has the
@@ -163,6 +163,10 @@ fn resolve_block<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, blk: &'tcx h
visitor.cx.var_parent = visitor.cx.parent;
visitor.visit_stmt(statement)
}
+ hir::StmtKind::Item(..) => {
+ // Don't create scopes for items, since they won't be
+ // lowered to THIR and MIR.
+ }
hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => visitor.visit_stmt(statement),
}
}
diff --git a/compiler/rustc_hir_analysis/src/check/wfcheck.rs b/compiler/rustc_hir_analysis/src/check/wfcheck.rs
index f5beefc47..77614a9a4 100644
--- a/compiler/rustc_hir_analysis/src/check/wfcheck.rs
+++ b/compiler/rustc_hir_analysis/src/check/wfcheck.rs
@@ -24,11 +24,15 @@ use rustc_span::symbol::{sym, Ident, Symbol};
use rustc_span::{Span, DUMMY_SP};
use rustc_target::spec::abi::Abi;
use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt;
+use rustc_trait_selection::traits::misc::{
+ type_allowed_to_implement_const_param_ty, ConstParamTyImplementationError,
+};
use rustc_trait_selection::traits::outlives_bounds::InferCtxtExt as _;
use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
use rustc_trait_selection::traits::{
self, ObligationCause, ObligationCauseCode, ObligationCtxt, WellFormedLoc,
};
+use rustc_type_ir::TypeFlags;
use std::cell::LazyCell;
use std::ops::{ControlFlow, Deref};
@@ -246,9 +250,7 @@ fn check_item<'tcx>(tcx: TyCtxt<'tcx>, item: &'tcx hir::Item<'tcx>) {
// `ForeignItem`s are handled separately.
hir::ItemKind::ForeignMod { .. } => {}
hir::ItemKind::TyAlias(hir_ty, ast_generics) => {
- if tcx.features().lazy_type_alias
- || tcx.type_of(item.owner_id).skip_binder().has_opaque_types()
- {
+ if tcx.type_alias_is_lazy(item.owner_id) {
// Bounds of lazy type aliases and of eager ones that contain opaque types are respected.
// E.g: `type X = impl Trait;`, `type X = (impl Trait, Y);`.
check_item_type(tcx, def_id, hir_ty.span, UnsizedHandling::Allow);
@@ -867,43 +869,65 @@ fn check_param_wf(tcx: TyCtxt<'_>, param: &hir::GenericParam<'_>) {
);
});
} else {
- let err_ty_str;
- let mut is_ptr = true;
-
- let err = match ty.kind() {
+ let diag = match ty.kind() {
ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Error(_) => None,
- ty::FnPtr(_) => Some("function pointers"),
- ty::RawPtr(_) => Some("raw pointers"),
- _ => {
- is_ptr = false;
- err_ty_str = format!("`{ty}`");
- Some(err_ty_str.as_str())
- }
+ ty::FnPtr(_) => Some(tcx.sess.struct_span_err(
+ hir_ty.span,
+ "using function pointers as const generic parameters is forbidden",
+ )),
+ ty::RawPtr(_) => Some(tcx.sess.struct_span_err(
+ hir_ty.span,
+ "using raw pointers as const generic parameters is forbidden",
+ )),
+ _ => Some(tcx.sess.struct_span_err(
+ hir_ty.span,
+ format!("`{}` is forbidden as the type of a const generic parameter", ty),
+ )),
};
- if let Some(unsupported_type) = err {
- if is_ptr {
- tcx.sess.span_err(
- hir_ty.span,
- format!(
- "using {unsupported_type} as const generic parameters is forbidden",
- ),
- );
- } else {
- let mut err = tcx.sess.struct_span_err(
- hir_ty.span,
- format!(
- "{unsupported_type} is forbidden as the type of a const generic parameter",
- ),
- );
- err.note("the only supported types are integers, `bool` and `char`");
- if tcx.sess.is_nightly_build() {
- err.help(
- "more complex types are supported with `#![feature(adt_const_params)]`",
- );
+ if let Some(mut diag) = diag {
+ diag.note("the only supported types are integers, `bool` and `char`");
+
+ let cause = ObligationCause::misc(hir_ty.span, param.def_id);
+ let may_suggest_feature = match type_allowed_to_implement_const_param_ty(
+ tcx,
+ tcx.param_env(param.def_id),
+ ty,
+ cause,
+ ) {
+ // Can never implement `ConstParamTy`, don't suggest anything.
+ Err(ConstParamTyImplementationError::NotAnAdtOrBuiltinAllowed) => false,
+ // May be able to implement `ConstParamTy`. Only emit the feature help
+ // if the type is local, since the user may be able to fix the local type.
+ Err(ConstParamTyImplementationError::InfrigingFields(..)) => {
+ fn ty_is_local(ty: Ty<'_>) -> bool {
+ match ty.kind() {
+ ty::Adt(adt_def, ..) => adt_def.did().is_local(),
+ // Arrays and slices use the inner type's `ConstParamTy`.
+ ty::Array(ty, ..) => ty_is_local(*ty),
+ ty::Slice(ty) => ty_is_local(*ty),
+ // `&` references use the inner type's `ConstParamTy`.
+ // `&mut` are not supported.
+ ty::Ref(_, ty, ast::Mutability::Not) => ty_is_local(*ty),
+ // Say that a tuple is local if any of its components are local.
+ // This is not strictly correct, but it's likely that the user can fix the local component.
+ ty::Tuple(tys) => tys.iter().any(|ty| ty_is_local(ty)),
+ _ => false,
+ }
+ }
+
+ ty_is_local(ty)
}
- err.emit();
+ // Implments `ConstParamTy`, suggest adding the feature to enable.
+ Ok(..) => true,
+ };
+ if may_suggest_feature && tcx.sess.is_nightly_build() {
+ diag.help(
+ "add `#![feature(adt_const_params)]` to the crate attributes to enable more complex and user defined types",
+ );
}
+
+ diag.emit();
}
}
}
@@ -1255,7 +1279,7 @@ fn check_where_clauses<'tcx>(wfcx: &WfCheckingCtxt<'_, 'tcx>, span: Span, def_id
let is_our_default = |def: &ty::GenericParamDef| match def.kind {
GenericParamDefKind::Type { has_default, .. }
- | GenericParamDefKind::Const { has_default } => {
+ | GenericParamDefKind::Const { has_default, .. } => {
has_default && def.index >= generics.parent_count as u32
}
GenericParamDefKind::Lifetime => unreachable!(),
@@ -1711,10 +1735,8 @@ fn check_variances_for_type_defn<'tcx>(
}
}
ItemKind::TyAlias(..) => {
- let ty = tcx.type_of(item.owner_id).instantiate_identity();
-
- if tcx.features().lazy_type_alias || ty.has_opaque_types() {
- if ty.references_error() {
+ if tcx.type_alias_is_lazy(item.owner_id) {
+ if tcx.type_of(item.owner_id).skip_binder().references_error() {
return;
}
} else {
@@ -1755,6 +1777,8 @@ fn check_variances_for_type_defn<'tcx>(
.collect::<FxHashSet<_>>()
});
+ let ty_generics = tcx.generics_of(item.owner_id);
+
for (index, _) in variances.iter().enumerate() {
let parameter = Parameter(index as u32);
@@ -1762,13 +1786,27 @@ fn check_variances_for_type_defn<'tcx>(
continue;
}
- let param = &hir_generics.params[index];
+ let ty_param = &ty_generics.params[index];
+ let hir_param = &hir_generics.params[index];
+
+ if ty_param.def_id != hir_param.def_id.into() {
+ // valid programs always have lifetimes before types in the generic parameter list
+ // ty_generics are normalized to be in this required order, and variances are built
+ // from ty generics, not from hir generics. but we need hir generics to get
+ // a span out
+ //
+ // if they aren't in the same order, then the user has written invalid code, and already
+ // got an error about it (or I'm wrong about this)
+ tcx.sess
+ .delay_span_bug(hir_param.span, "hir generics and ty generics in different order");
+ continue;
+ }
- match param.name {
+ match hir_param.name {
hir::ParamName::Error => {}
_ => {
let has_explicit_bounds = explicitly_bounded_params.contains(&parameter);
- report_bivariance(tcx, param, has_explicit_bounds);
+ report_bivariance(tcx, hir_param, has_explicit_bounds);
}
}
}
@@ -1825,7 +1863,7 @@ impl<'tcx> WfCheckingCtxt<'_, 'tcx> {
continue;
}
// Match the existing behavior.
- if pred.is_global() && !pred.has_late_bound_vars() {
+ if pred.is_global() && !pred.has_type_flags(TypeFlags::HAS_BINDER_VARS) {
let pred = self.normalize(span, None, pred);
let hir_node = tcx.hir().find_by_def_id(self.body_def_id);
diff --git a/compiler/rustc_hir_analysis/src/coherence/builtin.rs b/compiler/rustc_hir_analysis/src/coherence/builtin.rs
index c930537d4..be70acfc3 100644
--- a/compiler/rustc_hir_analysis/src/coherence/builtin.rs
+++ b/compiler/rustc_hir_analysis/src/coherence/builtin.rs
@@ -1,11 +1,10 @@
//! Check properties that are required by built-in traits and set
//! up data structures required by type-checking/codegen.
-use crate::errors::{
- ConstParamTyImplOnNonAdt, CopyImplOnNonAdt, CopyImplOnTypeWithDtor, DropImplOnWrongItem,
-};
+use crate::errors;
+
use rustc_data_structures::fx::FxHashSet;
-use rustc_errors::{struct_span_err, ErrorGuaranteed, MultiSpan};
+use rustc_errors::{ErrorGuaranteed, MultiSpan};
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::lang_items::LangItem;
@@ -65,7 +64,7 @@ fn visit_implementation_of_drop(tcx: TyCtxt<'_>, impl_did: LocalDefId) {
let impl_ = tcx.hir().expect_item(impl_did).expect_impl();
- tcx.sess.emit_err(DropImplOnWrongItem { span: impl_.self_ty.span });
+ tcx.sess.emit_err(errors::DropImplOnWrongItem { span: impl_.self_ty.span });
}
fn visit_implementation_of_copy(tcx: TyCtxt<'_>, impl_did: LocalDefId) {
@@ -91,10 +90,10 @@ fn visit_implementation_of_copy(tcx: TyCtxt<'_>, impl_did: LocalDefId) {
infringing_fields_error(tcx, fields, LangItem::Copy, impl_did, span);
}
Err(CopyImplementationError::NotAnAdt) => {
- tcx.sess.emit_err(CopyImplOnNonAdt { span });
+ tcx.sess.emit_err(errors::CopyImplOnNonAdt { span });
}
Err(CopyImplementationError::HasDestructor) => {
- tcx.sess.emit_err(CopyImplOnTypeWithDtor { span });
+ tcx.sess.emit_err(errors::CopyImplOnTypeWithDtor { span });
}
}
}
@@ -117,7 +116,7 @@ fn visit_implementation_of_const_param_ty(tcx: TyCtxt<'_>, impl_did: LocalDefId)
infringing_fields_error(tcx, fields, LangItem::ConstParamTy, impl_did, span);
}
Err(ConstParamTyImplementationError::NotAnAdtOrBuiltinAllowed) => {
- tcx.sess.emit_err(ConstParamTyImplOnNonAdt { span });
+ tcx.sess.emit_err(errors::ConstParamTyImplOnNonAdt { span });
}
}
}
@@ -152,11 +151,17 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef
let param_env = tcx.param_env(impl_did);
- let create_err = |msg: &str| struct_span_err!(tcx.sess, span, E0378, "{}", msg);
-
let infcx = tcx.infer_ctxt().build();
let cause = ObligationCause::misc(span, impl_did);
+ // Later parts of the compiler rely on all DispatchFromDyn types to be ABI-compatible with raw
+ // pointers. This is enforced here: we only allow impls for references, raw pointers, and things
+ // that are effectively repr(transparent) newtypes around types that already hav a
+ // DispatchedFromDyn impl. We cannot literally use repr(transparent) on those tpyes since some
+ // of them support an allocator, but we ensure that for the cases where the type implements this
+ // trait, they *do* satisfy the repr(transparent) rules, and then we assume that everything else
+ // in the compiler (in particular, all the call ABI logic) will treat them as repr(transparent)
+ // even if they do not carry that attribute.
use rustc_type_ir::sty::TyKind::*;
match (source.kind(), target.kind()) {
(&Ref(r_a, _, mutbl_a), Ref(r_b, _, mutbl_b))
@@ -168,22 +173,19 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef
let source_path = tcx.def_path_str(def_a.did());
let target_path = tcx.def_path_str(def_b.did());
- create_err(&format!(
- "the trait `DispatchFromDyn` may only be implemented \
- for a coercion between structures with the same \
- definition; expected `{source_path}`, found `{target_path}`",
- ))
- .emit();
+ tcx.sess.emit_err(errors::DispatchFromDynCoercion {
+ span,
+ trait_name: "DispatchFromDyn",
+ note: true,
+ source_path,
+ target_path,
+ });
return;
}
if def_a.repr().c() || def_a.repr().packed() {
- create_err(
- "structs implementing `DispatchFromDyn` may not have \
- `#[repr(packed)]` or `#[repr(C)]`",
- )
- .emit();
+ tcx.sess.emit_err(errors::DispatchFromDynRepr { span });
}
let fields = &def_a.non_enum_variant().fields;
@@ -195,8 +197,8 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef
let ty_b = field.ty(tcx, args_b);
if let Ok(layout) = tcx.layout_of(param_env.and(ty_a)) {
- if layout.is_zst() && layout.align.abi.bytes() == 1 {
- // ignore ZST fields with alignment of 1 byte
+ if layout.is_1zst() {
+ // ignore 1-ZST fields
return false;
}
}
@@ -205,16 +207,11 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef
infcx.at(&cause, param_env).eq(DefineOpaqueTypes::No, ty_a, ty_b)
{
if ok.obligations.is_empty() {
- create_err(
- "the trait `DispatchFromDyn` may only be implemented \
- for structs containing the field being coerced, \
- ZST fields with 1 byte alignment, and nothing else",
- )
- .note(format!(
- "extra field `{}` of type `{}` is not allowed",
- field.name, ty_a,
- ))
- .emit();
+ tcx.sess.emit_err(errors::DispatchFromDynZST {
+ span,
+ name: field.name,
+ ty: ty_a,
+ });
return false;
}
@@ -225,36 +222,29 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef
.collect::<Vec<_>>();
if coerced_fields.is_empty() {
- create_err(
- "the trait `DispatchFromDyn` may only be implemented \
- for a coercion between structures with a single field \
- being coerced, none found",
- )
- .emit();
+ tcx.sess.emit_err(errors::DispatchFromDynSingle {
+ span,
+ trait_name: "DispatchFromDyn",
+ note: true,
+ });
} else if coerced_fields.len() > 1 {
- create_err("implementing the `DispatchFromDyn` trait requires multiple coercions")
- .note(
- "the trait `DispatchFromDyn` may only be implemented \
- for a coercion between structures with a single field \
- being coerced",
- )
- .note(format!(
- "currently, {} fields need coercions: {}",
- coerced_fields.len(),
- coerced_fields
- .iter()
- .map(|field| {
- format!(
- "`{}` (`{}` to `{}`)",
- field.name,
- field.ty(tcx, args_a),
- field.ty(tcx, args_b),
- )
- })
- .collect::<Vec<_>>()
- .join(", ")
- ))
- .emit();
+ tcx.sess.emit_err(errors::DispatchFromDynMulti {
+ span,
+ coercions_note: true,
+ number: coerced_fields.len(),
+ coercions: coerced_fields
+ .iter()
+ .map(|field| {
+ format!(
+ "`{}` (`{}` to `{}`)",
+ field.name,
+ field.ty(tcx, args_a),
+ field.ty(tcx, args_b),
+ )
+ })
+ .collect::<Vec<_>>()
+ .join(", "),
+ });
} else {
let ocx = ObligationCtxt::new(&infcx);
for field in coerced_fields {
@@ -280,11 +270,7 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef
}
}
_ => {
- create_err(
- "the trait `DispatchFromDyn` may only be implemented \
- for a coercion between structures",
- )
- .emit();
+ tcx.sess.emit_err(errors::CoerceUnsizedMay { span, trait_name: "DispatchFromDyn" });
}
}
}
@@ -351,17 +337,13 @@ pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> Coe
if def_a != def_b {
let source_path = tcx.def_path_str(def_a.did());
let target_path = tcx.def_path_str(def_b.did());
- struct_span_err!(
- tcx.sess,
+ tcx.sess.emit_err(errors::DispatchFromDynSame {
span,
- E0377,
- "the trait `CoerceUnsized` may only be implemented \
- for a coercion between structures with the same \
- definition; expected `{}`, found `{}`",
+ trait_name: "CoerceUnsized",
+ note: true,
source_path,
- target_path
- )
- .emit();
+ target_path,
+ });
return err_info;
}
@@ -437,15 +419,11 @@ pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> Coe
.collect::<Vec<_>>();
if diff_fields.is_empty() {
- struct_span_err!(
- tcx.sess,
+ tcx.sess.emit_err(errors::CoerceUnsizedOneField {
span,
- E0374,
- "the trait `CoerceUnsized` may only be implemented \
- for a coercion between structures with one field \
- being coerced, none found"
- )
- .emit();
+ trait_name: "CoerceUnsized",
+ note: true,
+ });
return err_info;
} else if diff_fields.len() > 1 {
let item = tcx.hir().expect_item(impl_did);
@@ -455,29 +433,17 @@ pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> Coe
tcx.def_span(impl_did)
};
- struct_span_err!(
- tcx.sess,
+ tcx.sess.emit_err(errors::CoerceUnsizedMulti {
span,
- E0375,
- "implementing the trait \
- `CoerceUnsized` requires multiple \
- coercions"
- )
- .note(
- "`CoerceUnsized` may only be implemented for \
- a coercion between structures with one field being coerced",
- )
- .note(format!(
- "currently, {} fields need coercions: {}",
- diff_fields.len(),
- diff_fields
+ coercions_note: true,
+ number: diff_fields.len(),
+ coercions: diff_fields
.iter()
- .map(|&(i, a, b)| { format!("`{}` (`{}` to `{}`)", fields[i].name, a, b) })
+ .map(|&(i, a, b)| format!("`{}` (`{}` to `{}`)", fields[i].name, a, b))
.collect::<Vec<_>>()
- .join(", ")
- ))
- .span_label(span, "requires multiple coercions")
- .emit();
+ .join(", "),
+ });
+
return err_info;
}
@@ -487,14 +453,7 @@ pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> Coe
}
_ => {
- struct_span_err!(
- tcx.sess,
- span,
- E0376,
- "the trait `CoerceUnsized` may only be implemented \
- for a coercion between structures"
- )
- .emit();
+ tcx.sess.emit_err(errors::DispatchFromDynStruct { span, trait_name: "CoerceUnsized" });
return err_info;
}
};
@@ -532,13 +491,6 @@ fn infringing_fields_error(
let trait_name = tcx.def_path_str(trait_did);
- let mut err = struct_span_err!(
- tcx.sess,
- impl_span,
- E0204,
- "the trait `{trait_name}` cannot be implemented for this type"
- );
-
// We'll try to suggest constraining type parameters to fulfill the requirements of
// their `Copy` implementation.
let mut errors: BTreeMap<_, Vec<_>> = Default::default();
@@ -546,14 +498,15 @@ fn infringing_fields_error(
let mut seen_tys = FxHashSet::default();
+ let mut label_spans = Vec::new();
+
for (field, ty, reason) in fields {
// Only report an error once per type.
if !seen_tys.insert(ty) {
continue;
}
- let field_span = tcx.def_span(field.did);
- err.span_label(field_span, format!("this field does not implement `{trait_name}`"));
+ label_spans.push(tcx.def_span(field.did));
match reason {
InfringingFieldsReason::Fulfill(fulfillment_errors) => {
@@ -617,13 +570,24 @@ fn infringing_fields_error(
}
}
}
+ let mut notes = Vec::new();
for ((ty, error_predicate), spans) in errors {
let span: MultiSpan = spans.into();
- err.span_note(
+ notes.push(errors::ImplForTyRequires {
span,
- format!("the `{trait_name}` impl for `{ty}` requires that `{error_predicate}`"),
- );
+ error_predicate,
+ trait_name: trait_name.clone(),
+ ty,
+ });
}
+
+ let mut err = tcx.sess.create_err(errors::TraitCannotImplForTy {
+ span: impl_span,
+ trait_name,
+ label_spans,
+ notes,
+ });
+
suggest_constraining_type_params(
tcx,
tcx.hir().get_generics(impl_did).expect("impls always have generics"),
diff --git a/compiler/rustc_hir_analysis/src/coherence/inherent_impls.rs b/compiler/rustc_hir_analysis/src/coherence/inherent_impls.rs
index a94c75f91..0042d683b 100644
--- a/compiler/rustc_hir_analysis/src/coherence/inherent_impls.rs
+++ b/compiler/rustc_hir_analysis/src/coherence/inherent_impls.rs
@@ -7,7 +7,6 @@
//! `tcx.inherent_impls(def_id)`). That value, however,
//! is computed by selecting an idea from this table.
-use rustc_errors::struct_span_err;
use rustc_hir as hir;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{DefId, LocalDefId};
@@ -15,6 +14,8 @@ use rustc_middle::ty::fast_reject::{simplify_type, SimplifiedType, TreatParams};
use rustc_middle::ty::{self, CrateInherentImpls, Ty, TyCtxt};
use rustc_span::symbol::sym;
+use crate::errors;
+
/// On-demand query: yields a map containing all types mapped to their inherent impls.
pub fn crate_inherent_impls(tcx: TyCtxt<'_>, (): ()) -> CrateInherentImpls {
let mut collect = InherentCollect { tcx, impls_map: Default::default() };
@@ -45,14 +46,6 @@ struct InherentCollect<'tcx> {
impls_map: CrateInherentImpls,
}
-const INTO_CORE: &str = "consider moving this inherent impl into `core` if possible";
-const INTO_DEFINING_CRATE: &str =
- "consider moving this inherent impl into the crate defining the type if possible";
-const ADD_ATTR_TO_TY: &str = "alternatively add `#[rustc_has_incoherent_inherent_impls]` to the type \
- and `#[rustc_allow_incoherent_impl]` to the relevant impl items";
-const ADD_ATTR: &str =
- "alternatively add `#[rustc_allow_incoherent_impl]` to the relevant impl items";
-
impl<'tcx> InherentCollect<'tcx> {
fn check_def_id(&mut self, impl_def_id: LocalDefId, self_ty: Ty<'tcx>, ty_def_id: DefId) {
if let Some(ty_def_id) = ty_def_id.as_local() {
@@ -69,30 +62,17 @@ impl<'tcx> InherentCollect<'tcx> {
if !self.tcx.has_attr(ty_def_id, sym::rustc_has_incoherent_inherent_impls) {
let impl_span = self.tcx.def_span(impl_def_id);
- struct_span_err!(
- self.tcx.sess,
- impl_span,
- E0390,
- "cannot define inherent `impl` for a type outside of the crate where the type is defined",
- )
- .help(INTO_DEFINING_CRATE)
- .span_help(impl_span, ADD_ATTR_TO_TY)
- .emit();
+ self.tcx.sess.emit_err(errors::InherentTyOutside { span: impl_span });
return;
}
for &impl_item in items {
if !self.tcx.has_attr(impl_item, sym::rustc_allow_incoherent_impl) {
let impl_span = self.tcx.def_span(impl_def_id);
- struct_span_err!(
- self.tcx.sess,
- impl_span,
- E0390,
- "cannot define inherent `impl` for a type outside of the crate where the type is defined",
- )
- .help(INTO_DEFINING_CRATE)
- .span_help(self.tcx.def_span(impl_item), ADD_ATTR)
- .emit();
+ self.tcx.sess.emit_err(errors::InherentTyOutsideRelevant {
+ span: impl_span,
+ help_span: self.tcx.def_span(impl_item),
+ });
return;
}
}
@@ -104,16 +84,7 @@ impl<'tcx> InherentCollect<'tcx> {
}
} else {
let impl_span = self.tcx.def_span(impl_def_id);
- struct_span_err!(
- self.tcx.sess,
- impl_span,
- E0116,
- "cannot define inherent `impl` for a type outside of the crate \
- where the type is defined"
- )
- .span_label(impl_span, "impl for type defined outside of crate.")
- .note("define and implement a trait or new type instead")
- .emit();
+ self.tcx.sess.emit_err(errors::InherentTyOutsideNew { span: impl_span });
}
}
@@ -124,34 +95,20 @@ impl<'tcx> InherentCollect<'tcx> {
for &impl_item in items {
if !self.tcx.has_attr(impl_item, sym::rustc_allow_incoherent_impl) {
let span = self.tcx.def_span(impl_def_id);
- struct_span_err!(
- self.tcx.sess,
+ self.tcx.sess.emit_err(errors::InherentTyOutsidePrimitive {
span,
- E0390,
- "cannot define inherent `impl` for primitive types outside of `core`",
- )
- .help(INTO_CORE)
- .span_help(self.tcx.def_span(impl_item), ADD_ATTR)
- .emit();
+ help_span: self.tcx.def_span(impl_item),
+ });
return;
}
}
} else {
let span = self.tcx.def_span(impl_def_id);
- let mut err = struct_span_err!(
- self.tcx.sess,
- span,
- E0390,
- "cannot define inherent `impl` for primitive types",
- );
- err.help("consider using an extension trait instead");
+ let mut note = None;
if let ty::Ref(_, subty, _) = ty.kind() {
- err.note(format!(
- "you could also try moving the reference to \
- uses of `{subty}` (such as `self`) within the implementation"
- ));
+ note = Some(errors::InherentPrimitiveTyNote { subty: *subty });
}
- err.emit();
+ self.tcx.sess.emit_err(errors::InherentPrimitiveTy { span, note });
return;
}
}
@@ -178,15 +135,7 @@ impl<'tcx> InherentCollect<'tcx> {
self.check_def_id(id, self_ty, data.principal_def_id().unwrap());
}
ty::Dynamic(..) => {
- struct_span_err!(
- self.tcx.sess,
- item_span,
- E0785,
- "cannot define inherent `impl` for a dyn auto trait"
- )
- .span_label(item_span, "impl requires at least one non-auto trait")
- .note("define and implement a new trait or type instead")
- .emit();
+ self.tcx.sess.emit_err(errors::InherentDyn { span: item_span });
}
ty::Bool
| ty::Char
@@ -202,23 +151,12 @@ impl<'tcx> InherentCollect<'tcx> {
| ty::FnPtr(_)
| ty::Tuple(..) => self.check_primitive_impl(id, self_ty),
ty::Alias(..) | ty::Param(_) => {
- let mut err = struct_span_err!(
- self.tcx.sess,
- item_span,
- E0118,
- "no nominal type found for inherent implementation"
- );
-
- err.span_label(item_span, "impl requires a nominal type")
- .note("either implement a trait on it or create a newtype to wrap it instead");
-
- err.emit();
+ self.tcx.sess.emit_err(errors::InherentNominal { span: item_span });
}
ty::FnDef(..)
| ty::Closure(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Bound(..)
| ty::Placeholder(_)
| ty::Infer(_) => {
diff --git a/compiler/rustc_hir_analysis/src/coherence/orphan.rs b/compiler/rustc_hir_analysis/src/coherence/orphan.rs
index bbdb108c5..69020b1f1 100644
--- a/compiler/rustc_hir_analysis/src/coherence/orphan.rs
+++ b/compiler/rustc_hir_analysis/src/coherence/orphan.rs
@@ -245,7 +245,6 @@ fn do_orphan_check_impl<'tcx>(
ty::Closure(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Bound(..)
| ty::Placeholder(..)
| ty::Infer(..) => {
diff --git a/compiler/rustc_hir_analysis/src/collect.rs b/compiler/rustc_hir_analysis/src/collect.rs
index 7b9f61d7a..221df4e36 100644
--- a/compiler/rustc_hir_analysis/src/collect.rs
+++ b/compiler/rustc_hir_analysis/src/collect.rs
@@ -38,6 +38,7 @@ use rustc_trait_selection::infer::InferCtxtExt;
use rustc_trait_selection::traits::error_reporting::suggestions::NextTypeParamName;
use rustc_trait_selection::traits::ObligationCtxt;
use std::iter;
+use std::ops::Bound;
mod generics_of;
mod item_bounds;
@@ -56,6 +57,8 @@ pub fn provide(providers: &mut Providers) {
resolve_bound_vars::provide(providers);
*providers = Providers {
type_of: type_of::type_of,
+ type_of_opaque: type_of::type_of_opaque,
+ type_alias_is_lazy: type_of::type_alias_is_lazy,
item_bounds: item_bounds::item_bounds,
explicit_item_bounds: item_bounds::explicit_item_bounds,
generics_of: generics_of::generics_of,
@@ -1143,15 +1146,15 @@ fn fn_sig(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<ty::PolyFnSig<
}
Ctor(data) | Variant(hir::Variant { data, .. }) if data.ctor().is_some() => {
- let ty = tcx.type_of(tcx.hir().get_parent_item(hir_id)).instantiate_identity();
+ let adt_def_id = tcx.hir().get_parent_item(hir_id).def_id.to_def_id();
+ let ty = tcx.type_of(adt_def_id).instantiate_identity();
let inputs = data.fields().iter().map(|f| tcx.type_of(f.def_id).instantiate_identity());
- ty::Binder::dummy(tcx.mk_fn_sig(
- inputs,
- ty,
- false,
- hir::Unsafety::Normal,
- abi::Abi::Rust,
- ))
+ // constructors for structs with `layout_scalar_valid_range` are unsafe to call
+ let safety = match tcx.layout_scalar_valid_range(adt_def_id) {
+ (Bound::Unbounded, Bound::Unbounded) => hir::Unsafety::Normal,
+ _ => hir::Unsafety::Unsafe,
+ };
+ ty::Binder::dummy(tcx.mk_fn_sig(inputs, ty, false, safety, abi::Abi::Rust))
}
Expr(&hir::Expr { kind: hir::ExprKind::Closure { .. }, .. }) => {
@@ -1371,7 +1374,7 @@ fn impl_trait_ref(
// make astconv happy.
let mut path_segments = ast_trait_ref.path.segments.to_vec();
let last_segment = path_segments.len() - 1;
- let mut args = path_segments[last_segment].args().clone();
+ let mut args = *path_segments[last_segment].args();
let last_arg = args.args.len() - 1;
assert!(matches!(args.args[last_arg], hir::GenericArg::Const(anon_const) if tcx.has_attr(anon_const.value.def_id, sym::rustc_host)));
args.args = &args.args[..args.args.len() - 1];
diff --git a/compiler/rustc_hir_analysis/src/collect/generics_of.rs b/compiler/rustc_hir_analysis/src/collect/generics_of.rs
index 484200827..3d60c57b9 100644
--- a/compiler/rustc_hir_analysis/src/collect/generics_of.rs
+++ b/compiler/rustc_hir_analysis/src/collect/generics_of.rs
@@ -328,7 +328,10 @@ pub(super) fn generics_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Generics {
name: param.name.ident().name,
def_id: param.def_id.to_def_id(),
pure_wrt_drop: param.pure_wrt_drop,
- kind: ty::GenericParamDefKind::Const { has_default: default.is_some() },
+ kind: ty::GenericParamDefKind::Const {
+ has_default: default.is_some(),
+ is_host_effect: is_host_param,
+ },
})
}
}));
diff --git a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs
index 495e66366..1298c0860 100644
--- a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs
+++ b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs
@@ -162,8 +162,6 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen
};
let generics = tcx.generics_of(def_id);
- let parent_count = generics.parent_count as u32;
- let has_own_self = generics.has_self && parent_count == 0;
// Below we'll consider the bounds on the type parameters (including `Self`)
// and the explicit where-clauses, but to get the full set of predicates
@@ -189,17 +187,6 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen
predicates.insert((trait_ref.to_predicate(tcx), tcx.def_span(def_id)));
}
- // Collect the region predicates that were declared inline as
- // well. In the case of parameters declared on a fn or method, we
- // have to be careful to only iterate over early-bound regions.
- let mut index = parent_count
- + has_own_self as u32
- + super::early_bound_lifetimes_from_generics(tcx, ast_generics).count() as u32;
-
- trace!(?predicates);
- trace!(?ast_generics);
- trace!(?generics);
-
// Collect the predicates that were written inline by the user on each
// type parameter (e.g., `<T: Foo>`). Also add `ConstArgHasType` predicates
// for each const parameter.
@@ -208,10 +195,7 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen
// We already dealt with early bound lifetimes above.
GenericParamKind::Lifetime { .. } => (),
GenericParamKind::Type { .. } => {
- let name = param.name.ident().name;
- let param_ty = ty::ParamTy::new(index, name).to_ty(tcx);
- index += 1;
-
+ let param_ty = icx.astconv().hir_id_to_bound_ty(param.hir_id);
let mut bounds = Bounds::default();
// Params are implicitly sized unless a `?Sized` bound is found
icx.astconv().add_implicitly_sized(
@@ -225,23 +209,16 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen
predicates.extend(bounds.clauses());
trace!(?predicates);
}
- GenericParamKind::Const { .. } => {
- let name = param.name.ident().name;
- let param_const = ty::ParamConst::new(index, name);
-
+ hir::GenericParamKind::Const { .. } => {
let ct_ty = tcx
.type_of(param.def_id.to_def_id())
.no_bound_vars()
.expect("const parameters cannot be generic");
-
- let ct = ty::Const::new_param(tcx, param_const, ct_ty);
-
+ let ct = icx.astconv().hir_id_to_bound_const(param.hir_id, ct_ty);
predicates.insert((
ty::ClauseKind::ConstArgHasType(ct, ct_ty).to_predicate(tcx),
param.span,
));
-
- index += 1;
}
}
}
@@ -252,8 +229,7 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen
match predicate {
hir::WherePredicate::BoundPredicate(bound_pred) => {
let ty = icx.to_ty(bound_pred.bounded_ty);
- let bound_vars = icx.tcx.late_bound_vars(bound_pred.hir_id);
-
+ let bound_vars = tcx.late_bound_vars(bound_pred.hir_id);
// Keep the type around in a dummy predicate, in case of no bounds.
// That way, `where Ty:` is not a complete noop (see #53696) and `Ty`
// is still checked for WF.
@@ -296,7 +272,7 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen
_ => bug!(),
};
let pred = ty::ClauseKind::RegionOutlives(ty::OutlivesPredicate(r1, r2))
- .to_predicate(icx.tcx);
+ .to_predicate(tcx);
(pred, span)
}))
}
diff --git a/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs b/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs
index 6dd0c840d..eb4466449 100644
--- a/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs
+++ b/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs
@@ -158,13 +158,14 @@ enum Scope<'a> {
s: ScopeRef<'a>,
},
- /// Disallows capturing non-lifetime binders from parent scopes.
+ /// Disallows capturing late-bound vars from parent scopes.
///
/// This is necessary for something like `for<T> [(); { /* references T */ }]:`,
/// since we don't do something more correct like replacing any captured
/// late-bound vars with early-bound params in the const's own generics.
- AnonConstBoundary {
+ LateBoundary {
s: ScopeRef<'a>,
+ what: &'static str,
},
Root {
@@ -216,7 +217,9 @@ impl<'a> fmt::Debug for TruncatedScopeDebug<'a> {
.field("s", &"..")
.finish(),
Scope::TraitRefBoundary { s: _ } => f.debug_struct("TraitRefBoundary").finish(),
- Scope::AnonConstBoundary { s: _ } => f.debug_struct("AnonConstBoundary").finish(),
+ Scope::LateBoundary { s: _, what } => {
+ f.debug_struct("LateBoundary").field("what", what).finish()
+ }
Scope::Root { opt_parent_item } => {
f.debug_struct("Root").field("opt_parent_item", &opt_parent_item).finish()
}
@@ -318,7 +321,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
break (vec![], BinderScopeType::Normal);
}
- Scope::ObjectLifetimeDefault { s, .. } | Scope::AnonConstBoundary { s } => {
+ Scope::ObjectLifetimeDefault { s, .. } | Scope::LateBoundary { s, .. } => {
scope = s;
}
@@ -697,9 +700,12 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
}) => {
intravisit::walk_ty(self, ty);
- // Elided lifetimes are not allowed in non-return
- // position impl Trait
- let scope = Scope::TraitRefBoundary { s: self.scope };
+ // Elided lifetimes and late-bound lifetimes (from the parent)
+ // are not allowed in non-return position impl Trait
+ let scope = Scope::LateBoundary {
+ s: &Scope::TraitRefBoundary { s: self.scope },
+ what: "type alias impl trait",
+ };
self.with(scope, |this| intravisit::walk_item(this, opaque_ty));
return;
@@ -849,106 +855,87 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
fn visit_generics(&mut self, generics: &'tcx hir::Generics<'tcx>) {
let scope = Scope::TraitRefBoundary { s: self.scope };
self.with(scope, |this| {
- for param in generics.params {
- match param.kind {
- GenericParamKind::Lifetime { .. } => {}
- GenericParamKind::Type { default, .. } => {
- if let Some(ty) = default {
- this.visit_ty(ty);
- }
- }
- GenericParamKind::Const { ty, default } => {
- this.visit_ty(ty);
- if let Some(default) = default {
- this.visit_body(this.tcx.hir().body(default.body));
- }
- }
- }
+ walk_list!(this, visit_generic_param, generics.params);
+ walk_list!(this, visit_where_predicate, generics.predicates);
+ })
+ }
+
+ fn visit_where_predicate(&mut self, predicate: &'tcx hir::WherePredicate<'tcx>) {
+ match predicate {
+ &hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
+ hir_id,
+ bounded_ty,
+ bounds,
+ bound_generic_params,
+ origin,
+ ..
+ }) => {
+ let (bound_vars, binders): (FxIndexMap<LocalDefId, ResolvedArg>, Vec<_>) =
+ bound_generic_params
+ .iter()
+ .enumerate()
+ .map(|(late_bound_idx, param)| {
+ let pair = ResolvedArg::late(late_bound_idx as u32, param);
+ let r = late_arg_as_bound_arg(self.tcx, &pair.1, param);
+ (pair, r)
+ })
+ .unzip();
+ self.record_late_bound_vars(hir_id, binders.clone());
+ // Even if there are no lifetimes defined here, we still wrap it in a binder
+ // scope. If there happens to be a nested poly trait ref (an error), that
+ // will be `Concatenating` anyways, so we don't have to worry about the depth
+ // being wrong.
+ let scope = Scope::Binder {
+ hir_id,
+ bound_vars,
+ s: self.scope,
+ scope_type: BinderScopeType::Normal,
+ where_bound_origin: Some(origin),
+ };
+ self.with(scope, |this| {
+ walk_list!(this, visit_generic_param, bound_generic_params);
+ this.visit_ty(&bounded_ty);
+ walk_list!(this, visit_param_bound, bounds);
+ })
}
- for predicate in generics.predicates {
- match predicate {
- &hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
- hir_id,
- bounded_ty,
- bounds,
- bound_generic_params,
- origin,
- ..
- }) => {
- let (bound_vars, binders): (FxIndexMap<LocalDefId, ResolvedArg>, Vec<_>) =
- bound_generic_params
- .iter()
- .enumerate()
- .map(|(late_bound_idx, param)| {
- let pair = ResolvedArg::late(late_bound_idx as u32, param);
- let r = late_arg_as_bound_arg(this.tcx, &pair.1, param);
- (pair, r)
- })
- .unzip();
- this.record_late_bound_vars(hir_id, binders.clone());
- // Even if there are no lifetimes defined here, we still wrap it in a binder
- // scope. If there happens to be a nested poly trait ref (an error), that
- // will be `Concatenating` anyways, so we don't have to worry about the depth
- // being wrong.
- let scope = Scope::Binder {
- hir_id,
- bound_vars,
- s: this.scope,
- scope_type: BinderScopeType::Normal,
- where_bound_origin: Some(origin),
+ &hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate {
+ lifetime,
+ bounds,
+ ..
+ }) => {
+ self.visit_lifetime(lifetime);
+ walk_list!(self, visit_param_bound, bounds);
+
+ if lifetime.res != hir::LifetimeName::Static {
+ for bound in bounds {
+ let hir::GenericBound::Outlives(lt) = bound else {
+ continue;
};
- this.with(scope, |this| {
- this.visit_ty(&bounded_ty);
- walk_list!(this, visit_param_bound, bounds);
- })
- }
- &hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate {
- lifetime,
- bounds,
- ..
- }) => {
- this.visit_lifetime(lifetime);
- walk_list!(this, visit_param_bound, bounds);
-
- if lifetime.res != hir::LifetimeName::Static {
- for bound in bounds {
- let hir::GenericBound::Outlives(lt) = bound else {
- continue;
- };
- if lt.res != hir::LifetimeName::Static {
- continue;
- }
- this.insert_lifetime(lt, ResolvedArg::StaticLifetime);
- this.tcx.struct_span_lint_hir(
- lint::builtin::UNUSED_LIFETIMES,
- lifetime.hir_id,
- lifetime.ident.span,
- format!(
- "unnecessary lifetime parameter `{}`",
- lifetime.ident
- ),
- |lint| {
- let help = format!(
- "you can use the `'static` lifetime directly, in place of `{}`",
- lifetime.ident,
- );
- lint.help(help)
- },
- );
- }
+ if lt.res != hir::LifetimeName::Static {
+ continue;
}
- }
- &hir::WherePredicate::EqPredicate(hir::WhereEqPredicate {
- lhs_ty,
- rhs_ty,
- ..
- }) => {
- this.visit_ty(lhs_ty);
- this.visit_ty(rhs_ty);
+ self.insert_lifetime(lt, ResolvedArg::StaticLifetime);
+ self.tcx.struct_span_lint_hir(
+ lint::builtin::UNUSED_LIFETIMES,
+ lifetime.hir_id,
+ lifetime.ident.span,
+ format!("unnecessary lifetime parameter `{}`", lifetime.ident),
+ |lint| {
+ let help = format!(
+ "you can use the `'static` lifetime directly, in place of `{}`",
+ lifetime.ident,
+ );
+ lint.help(help)
+ },
+ );
}
}
}
- })
+ &hir::WherePredicate::EqPredicate(hir::WhereEqPredicate { lhs_ty, rhs_ty, .. }) => {
+ self.visit_ty(lhs_ty);
+ self.visit_ty(rhs_ty);
+ }
+ }
}
fn visit_param_bound(&mut self, bound: &'tcx hir::GenericBound<'tcx>) {
@@ -982,10 +969,37 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> {
}
fn visit_anon_const(&mut self, c: &'tcx hir::AnonConst) {
- self.with(Scope::AnonConstBoundary { s: self.scope }, |this| {
+ self.with(Scope::LateBoundary { s: self.scope, what: "constant" }, |this| {
intravisit::walk_anon_const(this, c);
});
}
+
+ fn visit_generic_param(&mut self, p: &'tcx GenericParam<'tcx>) {
+ match p.kind {
+ GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
+ self.resolve_type_ref(p.def_id, p.hir_id);
+ }
+ GenericParamKind::Lifetime { .. } => {
+ // No need to resolve lifetime params, we don't use them for things
+ // like implicit `?Sized` or const-param-has-ty predicates.
+ }
+ }
+
+ match p.kind {
+ GenericParamKind::Lifetime { .. } => {}
+ GenericParamKind::Type { default, .. } => {
+ if let Some(ty) = default {
+ self.visit_ty(ty);
+ }
+ }
+ GenericParamKind::Const { ty, default } => {
+ self.visit_ty(ty);
+ if let Some(default) = default {
+ self.visit_body(self.tcx.hir().body(default.body));
+ }
+ }
+ }
+ }
}
fn object_lifetime_default(tcx: TyCtxt<'_>, param_def_id: LocalDefId) -> ObjectLifetimeDefault {
@@ -1165,6 +1179,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
let mut late_depth = 0;
let mut scope = self.scope;
let mut outermost_body = None;
+ let mut crossed_late_boundary = None;
let result = loop {
match *scope {
Scope::Body { id, s } => {
@@ -1197,7 +1212,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
&& let Some(generics) = self.tcx.hir().get_generics(self.tcx.local_parent(param_id))
&& let Some(param) = generics.params.iter().find(|p| p.def_id == param_id)
&& param.is_elided_lifetime()
- && let hir::IsAsync::NotAsync = self.tcx.asyncness(lifetime_ref.hir_id.owner.def_id)
+ && !self.tcx.asyncness(lifetime_ref.hir_id.owner.def_id).is_async()
&& !self.tcx.features().anonymous_lifetime_in_impl_trait
{
let mut diag = rustc_session::parse::feature_err(
@@ -1249,8 +1264,12 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
Scope::ObjectLifetimeDefault { s, .. }
| Scope::Supertrait { s, .. }
- | Scope::TraitRefBoundary { s, .. }
- | Scope::AnonConstBoundary { s } => {
+ | Scope::TraitRefBoundary { s, .. } => {
+ scope = s;
+ }
+
+ Scope::LateBoundary { s, what } => {
+ crossed_late_boundary = Some(what);
scope = s;
}
}
@@ -1259,6 +1278,22 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
if let Some(mut def) = result {
if let ResolvedArg::EarlyBound(..) = def {
// Do not free early-bound regions, only late-bound ones.
+ } else if let ResolvedArg::LateBound(_, _, param_def_id) = def
+ && let Some(what) = crossed_late_boundary
+ {
+ let use_span = lifetime_ref.ident.span;
+ let def_span = self.tcx.def_span(param_def_id);
+ let guar = match self.tcx.def_kind(param_def_id) {
+ DefKind::LifetimeParam => {
+ self.tcx.sess.emit_err(errors::CannotCaptureLateBound::Lifetime {
+ use_span,
+ def_span,
+ what,
+ })
+ }
+ _ => unreachable!(),
+ };
+ def = ResolvedArg::Error(guar);
} else if let Some(body_id) = outermost_body {
let fn_id = self.tcx.hir().body_owner(body_id);
match self.tcx.hir().get(fn_id) {
@@ -1313,7 +1348,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
| Scope::ObjectLifetimeDefault { s, .. }
| Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. }
- | Scope::AnonConstBoundary { s } => {
+ | Scope::LateBoundary { s, .. } => {
scope = s;
}
}
@@ -1332,7 +1367,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
// search.
let mut late_depth = 0;
let mut scope = self.scope;
- let mut crossed_anon_const = false;
+ let mut crossed_late_boundary = None;
let result = loop {
match *scope {
@@ -1367,28 +1402,32 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
scope = s;
}
- Scope::AnonConstBoundary { s } => {
- crossed_anon_const = true;
+ Scope::LateBoundary { s, what } => {
+ crossed_late_boundary = Some(what);
scope = s;
}
}
};
if let Some(def) = result {
- if let ResolvedArg::LateBound(..) = def && crossed_anon_const {
+ if let ResolvedArg::LateBound(..) = def
+ && let Some(what) = crossed_late_boundary
+ {
let use_span = self.tcx.hir().span(hir_id);
let def_span = self.tcx.def_span(param_def_id);
let guar = match self.tcx.def_kind(param_def_id) {
DefKind::ConstParam => {
- self.tcx.sess.emit_err(errors::CannotCaptureLateBoundInAnonConst::Const {
+ self.tcx.sess.emit_err(errors::CannotCaptureLateBound::Const {
use_span,
def_span,
+ what,
})
}
DefKind::TyParam => {
- self.tcx.sess.emit_err(errors::CannotCaptureLateBoundInAnonConst::Type {
+ self.tcx.sess.emit_err(errors::CannotCaptureLateBound::Type {
use_span,
def_span,
+ what,
})
}
_ => unreachable!(),
@@ -1437,7 +1476,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
| Scope::ObjectLifetimeDefault { s, .. }
| Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. }
- | Scope::AnonConstBoundary { s } => {
+ | Scope::LateBoundary { s, .. } => {
scope = s;
}
}
@@ -1480,7 +1519,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
DefKind::Struct
| DefKind::Union
| DefKind::Enum
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::Trait,
def_id,
) if depth == 0 => Some(def_id),
@@ -1517,7 +1556,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
| Scope::ObjectLifetimeDefault { s, .. }
| Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. }
- | Scope::AnonConstBoundary { s } => {
+ | Scope::LateBoundary { s, .. } => {
scope = s;
}
}
@@ -1822,7 +1861,7 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> {
Scope::Supertrait { s, .. }
| Scope::TraitRefBoundary { s, .. }
- | Scope::AnonConstBoundary { s } => {
+ | Scope::LateBoundary { s, .. } => {
scope = s;
}
}
@@ -1990,7 +2029,7 @@ fn is_late_bound_map(
hir::TyKind::Path(hir::QPath::Resolved(
None,
- hir::Path { res: Res::Def(DefKind::TyAlias { .. }, alias_def), segments, span },
+ hir::Path { res: Res::Def(DefKind::TyAlias, alias_def), segments, span },
)) => {
// See comments on `ConstrainedCollectorPostAstConv` for why this arm does not just consider
// args to be unconstrained.
diff --git a/compiler/rustc_hir_analysis/src/collect/type_of.rs b/compiler/rustc_hir_analysis/src/collect/type_of.rs
index 2bbdbe3a1..ae62119b1 100644
--- a/compiler/rustc_hir_analysis/src/collect/type_of.rs
+++ b/compiler/rustc_hir_analysis/src/collect/type_of.rs
@@ -1,7 +1,8 @@
use rustc_errors::{Applicability, StashKey};
use rustc_hir as hir;
-use rustc_hir::def_id::LocalDefId;
+use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::HirId;
+use rustc_middle::query::plumbing::CyclePlaceholder;
use rustc_middle::ty::print::with_forced_trimmed_paths;
use rustc_middle::ty::util::IntTypeExt;
use rustc_middle::ty::{self, ImplTraitInTraitData, IsSuggestable, Ty, TyCtxt, TypeVisitableExt};
@@ -388,86 +389,62 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<Ty
}
},
- Node::Item(item) => {
- match item.kind {
- ItemKind::Static(ty, .., body_id) => {
- if is_suggestable_infer_ty(ty) {
- infer_placeholder_type(
- tcx,
- def_id,
- body_id,
- ty.span,
- item.ident,
- "static variable",
- )
- } else {
- icx.to_ty(ty)
- }
- }
- ItemKind::Const(ty, _, body_id) => {
- if is_suggestable_infer_ty(ty) {
- infer_placeholder_type(
- tcx, def_id, body_id, ty.span, item.ident, "constant",
- )
- } else {
- icx.to_ty(ty)
- }
- }
- ItemKind::TyAlias(self_ty, _) => icx.to_ty(self_ty),
- ItemKind::Impl(hir::Impl { self_ty, .. }) => match self_ty.find_self_aliases() {
- spans if spans.len() > 0 => {
- let guar = tcx.sess.emit_err(crate::errors::SelfInImplSelf {
- span: spans.into(),
- note: (),
- });
- Ty::new_error(tcx, guar)
- }
- _ => icx.to_ty(*self_ty),
- },
- ItemKind::Fn(..) => {
- let args = ty::GenericArgs::identity_for_item(tcx, def_id);
- Ty::new_fn_def(tcx, def_id.to_def_id(), args)
- }
- ItemKind::Enum(..) | ItemKind::Struct(..) | ItemKind::Union(..) => {
- let def = tcx.adt_def(def_id);
- let args = ty::GenericArgs::identity_for_item(tcx, def_id);
- Ty::new_adt(tcx, def, args)
+ Node::Item(item) => match item.kind {
+ ItemKind::Static(ty, .., body_id) => {
+ if is_suggestable_infer_ty(ty) {
+ infer_placeholder_type(
+ tcx,
+ def_id,
+ body_id,
+ ty.span,
+ item.ident,
+ "static variable",
+ )
+ } else {
+ icx.to_ty(ty)
}
- ItemKind::OpaqueTy(OpaqueTy {
- origin: hir::OpaqueTyOrigin::TyAlias { .. },
- ..
- }) => opaque::find_opaque_ty_constraints_for_tait(tcx, def_id),
- // Opaque types desugared from `impl Trait`.
- ItemKind::OpaqueTy(&OpaqueTy {
- origin:
- hir::OpaqueTyOrigin::FnReturn(owner) | hir::OpaqueTyOrigin::AsyncFn(owner),
- in_trait,
- ..
- }) => {
- if in_trait && !tcx.defaultness(owner).has_value() {
- span_bug!(
- tcx.def_span(def_id),
- "tried to get type of this RPITIT with no definition"
- );
- }
- opaque::find_opaque_ty_constraints_for_rpit(tcx, def_id, owner)
+ }
+ ItemKind::Const(ty, _, body_id) => {
+ if is_suggestable_infer_ty(ty) {
+ infer_placeholder_type(tcx, def_id, body_id, ty.span, item.ident, "constant")
+ } else {
+ icx.to_ty(ty)
}
- ItemKind::Trait(..)
- | ItemKind::TraitAlias(..)
- | ItemKind::Macro(..)
- | ItemKind::Mod(..)
- | ItemKind::ForeignMod { .. }
- | ItemKind::GlobalAsm(..)
- | ItemKind::ExternCrate(..)
- | ItemKind::Use(..) => {
- span_bug!(
- item.span,
- "compute_type_of_item: unexpected item type: {:?}",
- item.kind
- );
+ }
+ ItemKind::TyAlias(self_ty, _) => icx.to_ty(self_ty),
+ ItemKind::Impl(hir::Impl { self_ty, .. }) => match self_ty.find_self_aliases() {
+ spans if spans.len() > 0 => {
+ let guar = tcx
+ .sess
+ .emit_err(crate::errors::SelfInImplSelf { span: spans.into(), note: () });
+ Ty::new_error(tcx, guar)
}
+ _ => icx.to_ty(*self_ty),
+ },
+ ItemKind::Fn(..) => {
+ let args = ty::GenericArgs::identity_for_item(tcx, def_id);
+ Ty::new_fn_def(tcx, def_id.to_def_id(), args)
}
- }
+ ItemKind::Enum(..) | ItemKind::Struct(..) | ItemKind::Union(..) => {
+ let def = tcx.adt_def(def_id);
+ let args = ty::GenericArgs::identity_for_item(tcx, def_id);
+ Ty::new_adt(tcx, def, args)
+ }
+ ItemKind::OpaqueTy(..) => tcx.type_of_opaque(def_id).map_or_else(
+ |CyclePlaceholder(guar)| Ty::new_error(tcx, guar),
+ |ty| ty.instantiate_identity(),
+ ),
+ ItemKind::Trait(..)
+ | ItemKind::TraitAlias(..)
+ | ItemKind::Macro(..)
+ | ItemKind::Mod(..)
+ | ItemKind::ForeignMod { .. }
+ | ItemKind::GlobalAsm(..)
+ | ItemKind::ExternCrate(..)
+ | ItemKind::Use(..) => {
+ span_bug!(item.span, "compute_type_of_item: unexpected item type: {:?}", item.kind);
+ }
+ },
Node::ForeignItem(foreign_item) => match foreign_item.kind {
ForeignItemKind::Fn(..) => {
@@ -514,6 +491,51 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<Ty
ty::EarlyBinder::bind(output)
}
+pub(super) fn type_of_opaque(
+ tcx: TyCtxt<'_>,
+ def_id: DefId,
+) -> Result<ty::EarlyBinder<Ty<'_>>, CyclePlaceholder> {
+ if let Some(def_id) = def_id.as_local() {
+ use rustc_hir::*;
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ Ok(ty::EarlyBinder::bind(match tcx.hir().get(hir_id) {
+ Node::Item(item) => match item.kind {
+ ItemKind::OpaqueTy(OpaqueTy {
+ origin: hir::OpaqueTyOrigin::TyAlias { .. },
+ ..
+ }) => opaque::find_opaque_ty_constraints_for_tait(tcx, def_id),
+ // Opaque types desugared from `impl Trait`.
+ ItemKind::OpaqueTy(&OpaqueTy {
+ origin:
+ hir::OpaqueTyOrigin::FnReturn(owner) | hir::OpaqueTyOrigin::AsyncFn(owner),
+ in_trait,
+ ..
+ }) => {
+ if in_trait && !tcx.defaultness(owner).has_value() {
+ span_bug!(
+ tcx.def_span(def_id),
+ "tried to get type of this RPITIT with no definition"
+ );
+ }
+ opaque::find_opaque_ty_constraints_for_rpit(tcx, def_id, owner)
+ }
+ _ => {
+ span_bug!(item.span, "type_of_opaque: unexpected item type: {:?}", item.kind);
+ }
+ },
+
+ x => {
+ bug!("unexpected sort of node in type_of_opaque(): {:?}", x);
+ }
+ }))
+ } else {
+ // Foreign opaque type will go through the foreign provider
+ // and load the type from metadata.
+ Ok(tcx.type_of(def_id))
+ }
+}
+
fn infer_placeholder_type<'a>(
tcx: TyCtxt<'a>,
def_id: LocalDefId,
@@ -601,3 +623,25 @@ fn check_feature_inherent_assoc_ty(tcx: TyCtxt<'_>, span: Span) {
.emit();
}
}
+
+pub fn type_alias_is_lazy<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> bool {
+ use hir::intravisit::Visitor;
+ if tcx.features().lazy_type_alias {
+ return true;
+ }
+ struct HasTait {
+ has_type_alias_impl_trait: bool,
+ }
+ impl<'tcx> Visitor<'tcx> for HasTait {
+ fn visit_ty(&mut self, t: &'tcx hir::Ty<'tcx>) {
+ if let hir::TyKind::OpaqueDef(..) = t.kind {
+ self.has_type_alias_impl_trait = true;
+ } else {
+ hir::intravisit::walk_ty(self, t);
+ }
+ }
+ }
+ let mut has_tait = HasTait { has_type_alias_impl_trait: false };
+ has_tait.visit_ty(tcx.hir().expect_item(def_id).expect_ty_alias().0);
+ has_tait.has_type_alias_impl_trait
+}
diff --git a/compiler/rustc_hir_analysis/src/collect/type_of/opaque.rs b/compiler/rustc_hir_analysis/src/collect/type_of/opaque.rs
index 957a6bb34..0544c5ca8 100644
--- a/compiler/rustc_hir_analysis/src/collect/type_of/opaque.rs
+++ b/compiler/rustc_hir_analysis/src/collect/type_of/opaque.rs
@@ -1,7 +1,7 @@
use rustc_errors::StashKey;
use rustc_hir::def_id::LocalDefId;
use rustc_hir::intravisit::{self, Visitor};
-use rustc_hir::{self as hir, Expr, ImplItem, Item, Node, TraitItem};
+use rustc_hir::{self as hir, def, Expr, ImplItem, Item, Node, TraitItem};
use rustc_middle::hir::nested_filter;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt};
use rustc_span::DUMMY_SP;
@@ -74,9 +74,14 @@ pub(super) fn find_opaque_ty_constraints_for_tait(tcx: TyCtxt<'_>, def_id: Local
hidden.ty
} else {
+ let mut parent_def_id = def_id;
+ while tcx.def_kind(parent_def_id) == def::DefKind::OpaqueTy {
+ // Account for `type Alias = impl Trait<Foo = impl Trait>;` (#116031)
+ parent_def_id = tcx.local_parent(parent_def_id);
+ }
let reported = tcx.sess.emit_err(UnconstrainedOpaqueType {
span: tcx.def_span(def_id),
- name: tcx.item_name(tcx.local_parent(def_id).to_def_id()),
+ name: tcx.item_name(parent_def_id.to_def_id()),
what: match tcx.hir().get(scope) {
_ if scope == hir::CRATE_HIR_ID => "module",
Node::Item(hir::Item { kind: hir::ItemKind::Mod(_), .. }) => "module",
diff --git a/compiler/rustc_hir_analysis/src/errors.rs b/compiler/rustc_hir_analysis/src/errors.rs
index 9471ad9ca..0efe82b20 100644
--- a/compiler/rustc_hir_analysis/src/errors.rs
+++ b/compiler/rustc_hir_analysis/src/errors.rs
@@ -430,20 +430,30 @@ pub(crate) struct VariadicFunctionCompatibleConvention<'a> {
}
#[derive(Diagnostic)]
-pub(crate) enum CannotCaptureLateBoundInAnonConst {
- #[diag(hir_analysis_cannot_capture_late_bound_ty_in_anon_const)]
+pub(crate) enum CannotCaptureLateBound {
+ #[diag(hir_analysis_cannot_capture_late_bound_ty)]
Type {
#[primary_span]
use_span: Span,
#[label]
def_span: Span,
+ what: &'static str,
},
- #[diag(hir_analysis_cannot_capture_late_bound_const_in_anon_const)]
+ #[diag(hir_analysis_cannot_capture_late_bound_const)]
Const {
#[primary_span]
use_span: Span,
#[label]
def_span: Span,
+ what: &'static str,
+ },
+ #[diag(hir_analysis_cannot_capture_late_bound_lifetime)]
+ Lifetime {
+ #[primary_span]
+ use_span: Span,
+ #[label]
+ def_span: Span,
+ what: &'static str,
},
}
@@ -919,6 +929,22 @@ pub struct UnusedAssociatedTypeBounds {
pub span: Span,
}
+#[derive(LintDiagnostic)]
+#[diag(hir_analysis_rpitit_refined)]
+#[note]
+pub(crate) struct ReturnPositionImplTraitInTraitRefined<'tcx> {
+ #[suggestion(applicability = "maybe-incorrect", code = "{pre}{return_ty}{post}")]
+ pub impl_return_span: Span,
+ #[label]
+ pub trait_return_span: Option<Span>,
+ #[label(hir_analysis_unmatched_bound_label)]
+ pub unmatched_bound: Option<Span>,
+
+ pub pre: &'static str,
+ pub post: &'static str,
+ pub return_ty: Ty<'tcx>,
+}
+
#[derive(Diagnostic)]
#[diag(hir_analysis_assoc_bound_on_const)]
#[note]
@@ -927,3 +953,199 @@ pub struct AssocBoundOnConst {
pub span: Span,
pub descr: &'static str,
}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_inherent_ty_outside, code = "E0390")]
+#[help]
+pub struct InherentTyOutside {
+ #[primary_span]
+ #[help(hir_analysis_span_help)]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_coerce_unsized_may, code = "E0378")]
+pub struct DispatchFromDynCoercion<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub trait_name: &'a str,
+ #[note(hir_analysis_coercion_between_struct_same_note)]
+ pub note: bool,
+ pub source_path: String,
+ pub target_path: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_dispatch_from_dyn_repr, code = "E0378")]
+pub struct DispatchFromDynRepr {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_inherent_ty_outside_relevant, code = "E0390")]
+#[help]
+pub struct InherentTyOutsideRelevant {
+ #[primary_span]
+ pub span: Span,
+ #[help(hir_analysis_span_help)]
+ pub help_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_inherent_ty_outside_new, code = "E0116")]
+#[note]
+pub struct InherentTyOutsideNew {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_inherent_ty_outside_primitive, code = "E0390")]
+#[help]
+pub struct InherentTyOutsidePrimitive {
+ #[primary_span]
+ pub span: Span,
+ #[help(hir_analysis_span_help)]
+ pub help_span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_inherent_primitive_ty, code = "E0390")]
+#[help]
+pub struct InherentPrimitiveTy<'a> {
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub note: Option<InherentPrimitiveTyNote<'a>>,
+}
+
+#[derive(Subdiagnostic)]
+#[note(hir_analysis_inherent_primitive_ty_note)]
+pub struct InherentPrimitiveTyNote<'a> {
+ pub subty: Ty<'a>,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_inherent_dyn, code = "E0785")]
+#[note]
+pub struct InherentDyn {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_inherent_nominal, code = "E0118")]
+#[note]
+pub struct InherentNominal {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_dispatch_from_dyn_zst, code = "E0378")]
+#[note]
+pub struct DispatchFromDynZST<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+ pub ty: Ty<'a>,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_coerce_unsized_may, code = "E0378")]
+pub struct DispatchFromDynSingle<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub trait_name: &'a str,
+ #[note(hir_analysis_coercion_between_struct_single_note)]
+ pub note: bool,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_dispatch_from_dyn_multi, code = "E0378")]
+#[note]
+pub struct DispatchFromDynMulti {
+ #[primary_span]
+ pub span: Span,
+ #[note(hir_analysis_coercions_note)]
+ pub coercions_note: bool,
+ pub number: usize,
+ pub coercions: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_coerce_unsized_may, code = "E0376")]
+pub struct DispatchFromDynStruct<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub trait_name: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_coerce_unsized_may, code = "E0377")]
+pub struct DispatchFromDynSame<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub trait_name: &'a str,
+ #[note(hir_analysis_coercion_between_struct_same_note)]
+ pub note: bool,
+ pub source_path: String,
+ pub target_path: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_coerce_unsized_may, code = "E0374")]
+pub struct CoerceUnsizedOneField<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub trait_name: &'a str,
+ #[note(hir_analysis_coercion_between_struct_single_note)]
+ pub note: bool,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_coerce_unsized_multi, code = "E0375")]
+#[note]
+pub struct CoerceUnsizedMulti {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[note(hir_analysis_coercions_note)]
+ pub coercions_note: bool,
+ pub number: usize,
+ pub coercions: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_coerce_unsized_may, code = "E0378")]
+pub struct CoerceUnsizedMay<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub trait_name: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_analysis_trait_cannot_impl_for_ty, code = "E0204")]
+pub struct TraitCannotImplForTy {
+ #[primary_span]
+ pub span: Span,
+ pub trait_name: String,
+ #[label]
+ pub label_spans: Vec<Span>,
+ #[subdiagnostic]
+ pub notes: Vec<ImplForTyRequires>,
+}
+
+#[derive(Subdiagnostic)]
+#[note(hir_analysis_requires_note)]
+pub struct ImplForTyRequires {
+ #[primary_span]
+ pub span: MultiSpan,
+ pub error_predicate: String,
+ pub trait_name: String,
+ pub ty: String,
+}
diff --git a/compiler/rustc_hir_analysis/src/lib.rs b/compiler/rustc_hir_analysis/src/lib.rs
index 4f95174f8..03963925d 100644
--- a/compiler/rustc_hir_analysis/src/lib.rs
+++ b/compiler/rustc_hir_analysis/src/lib.rs
@@ -99,7 +99,6 @@ use rustc_errors::ErrorGuaranteed;
use rustc_errors::{DiagnosticMessage, SubdiagnosticMessage};
use rustc_fluent_macro::fluent_messages;
use rustc_hir as hir;
-use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::middle;
use rustc_middle::query::Providers;
use rustc_middle::ty::{self, Ty, TyCtxt};
@@ -107,8 +106,7 @@ use rustc_middle::util;
use rustc_session::parse::feature_err;
use rustc_span::{symbol::sym, Span, DUMMY_SP};
use rustc_target::spec::abi::Abi;
-use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt as _;
-use rustc_trait_selection::traits::{self, ObligationCause, ObligationCtxt};
+use rustc_trait_selection::traits;
use astconv::{AstConv, OnlySelfBounds};
use bounds::Bounds;
@@ -117,7 +115,7 @@ use rustc_hir::def::DefKind;
fluent_messages! { "../messages.ftl" }
fn require_c_abi_if_c_variadic(tcx: TyCtxt<'_>, decl: &hir::FnDecl<'_>, abi: Abi, span: Span) {
- const CONVENTIONS_UNSTABLE: &str = "`C`, `cdecl`, `win64`, `sysv64` or `efiapi`";
+ const CONVENTIONS_UNSTABLE: &str = "`C`, `cdecl`, `aapcs`, `win64`, `sysv64` or `efiapi`";
const CONVENTIONS_STABLE: &str = "`C` or `cdecl`";
const UNSTABLE_EXPLAIN: &str =
"using calling conventions other than `C` or `cdecl` for varargs functions is unstable";
@@ -151,28 +149,6 @@ fn require_c_abi_if_c_variadic(tcx: TyCtxt<'_>, decl: &hir::FnDecl<'_>, abi: Abi
tcx.sess.emit_err(errors::VariadicFunctionCompatibleConvention { span, conventions });
}
-fn require_same_types<'tcx>(
- tcx: TyCtxt<'tcx>,
- cause: &ObligationCause<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- expected: Ty<'tcx>,
- actual: Ty<'tcx>,
-) {
- let infcx = &tcx.infer_ctxt().build();
- let ocx = ObligationCtxt::new(infcx);
- match ocx.eq(cause, param_env, expected, actual) {
- Ok(()) => {
- let errors = ocx.select_all_or_error();
- if !errors.is_empty() {
- infcx.err_ctxt().report_fulfillment_errors(&errors);
- }
- }
- Err(err) => {
- infcx.err_ctxt().report_mismatched_types(cause, expected, actual, err).emit();
- }
- }
-}
-
pub fn provide(providers: &mut Providers) {
collect::provide(providers);
coherence::provide(providers);
@@ -237,6 +213,10 @@ pub fn check_crate(tcx: TyCtxt<'_>) -> Result<(), ErrorGuaranteed> {
tcx.hir().for_each_module(|module| tcx.ensure().check_mod_item_types(module))
});
+ // Freeze definitions as we don't add new ones at this point. This improves performance by
+ // allowing lock-free access to them.
+ tcx.untracked().definitions.freeze();
+
// FIXME: Remove this when we implement creating `DefId`s
// for anon constants during their parents' typeck.
// Typeck all body owners in parallel will produce queries
diff --git a/compiler/rustc_hir_analysis/src/variance/constraints.rs b/compiler/rustc_hir_analysis/src/variance/constraints.rs
index 8a40509d7..61d9c989e 100644
--- a/compiler/rustc_hir_analysis/src/variance/constraints.rs
+++ b/compiler/rustc_hir_analysis/src/variance/constraints.rs
@@ -6,7 +6,7 @@
use hir::def_id::{DefId, LocalDefId};
use rustc_hir as hir;
use rustc_hir::def::DefKind;
-use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt};
+use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::ty::{GenericArgKind, GenericArgsRef};
use super::terms::VarianceTerm::*;
@@ -78,9 +78,7 @@ pub fn add_constraints_from_crate<'a, 'tcx>(
}
}
DefKind::Fn | DefKind::AssocFn => constraint_cx.build_constraints_for_item(def_id),
- DefKind::TyAlias { lazy }
- if lazy || tcx.type_of(def_id).instantiate_identity().has_opaque_types() =>
- {
+ DefKind::TyAlias if tcx.type_alias_is_lazy(def_id) => {
constraint_cx.build_constraints_for_item(def_id)
}
_ => {}
@@ -110,8 +108,8 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
// The type as returned by `type_of` is the underlying type and generally not a weak projection.
// Therefore we need to check the `DefKind` first.
- if let DefKind::TyAlias { lazy } = tcx.def_kind(def_id)
- && (lazy || ty.has_opaque_types())
+ if let DefKind::TyAlias = tcx.def_kind(def_id)
+ && tcx.type_alias_is_lazy(def_id)
{
self.add_constraints_from_ty(current_item, ty, self.covariant);
return;
@@ -314,11 +312,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
// types, where we use Error as the Self type
}
- ty::Placeholder(..)
- | ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
- | ty::Bound(..)
- | ty::Infer(..) => {
+ ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Bound(..) | ty::Infer(..) => {
bug!("unexpected type encountered in variance inference: {}", ty);
}
}
diff --git a/compiler/rustc_hir_analysis/src/variance/mod.rs b/compiler/rustc_hir_analysis/src/variance/mod.rs
index d91d9fcbc..85e0000ab 100644
--- a/compiler/rustc_hir_analysis/src/variance/mod.rs
+++ b/compiler/rustc_hir_analysis/src/variance/mod.rs
@@ -8,7 +8,7 @@ use rustc_hir::def::DefKind;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_middle::query::Providers;
use rustc_middle::ty::{self, CrateVariancesMap, GenericArgsRef, Ty, TyCtxt};
-use rustc_middle::ty::{TypeSuperVisitable, TypeVisitable, TypeVisitableExt};
+use rustc_middle::ty::{TypeSuperVisitable, TypeVisitable};
use std::ops::ControlFlow;
/// Defines the `TermsContext` basically houses an arena where we can
@@ -56,9 +56,7 @@ fn variances_of(tcx: TyCtxt<'_>, item_def_id: LocalDefId) -> &[ty::Variance] {
let crate_map = tcx.crate_variances(());
return crate_map.variances.get(&item_def_id.to_def_id()).copied().unwrap_or(&[]);
}
- DefKind::TyAlias { lazy }
- if lazy || tcx.type_of(item_def_id).instantiate_identity().has_opaque_types() =>
- {
+ DefKind::TyAlias if tcx.type_alias_is_lazy(item_def_id) => {
// These are inferred.
let crate_map = tcx.crate_variances(());
return crate_map.variances.get(&item_def_id.to_def_id()).copied().unwrap_or(&[]);
@@ -129,7 +127,15 @@ fn variance_of_opaque(tcx: TyCtxt<'_>, item_def_id: LocalDefId) -> &[ty::Varianc
// By default, RPIT are invariant wrt type and const generics, but they are bivariant wrt
// lifetime generics.
- let mut variances: Vec<_> = std::iter::repeat(ty::Invariant).take(generics.count()).collect();
+ let variances = std::iter::repeat(ty::Invariant).take(generics.count());
+
+ let mut variances: Vec<_> = match tcx.opaque_type_origin(item_def_id) {
+ rustc_hir::OpaqueTyOrigin::FnReturn(_) | rustc_hir::OpaqueTyOrigin::AsyncFn(_) => {
+ variances.collect()
+ }
+ // But TAIT are invariant for all generics
+ rustc_hir::OpaqueTyOrigin::TyAlias { .. } => return tcx.arena.alloc_from_iter(variances),
+ };
// Mark all lifetimes from parent generics as unused (Bivariant).
// This will be overridden later if required.
diff --git a/compiler/rustc_hir_analysis/src/variance/terms.rs b/compiler/rustc_hir_analysis/src/variance/terms.rs
index 1a8ec5f08..275df2495 100644
--- a/compiler/rustc_hir_analysis/src/variance/terms.rs
+++ b/compiler/rustc_hir_analysis/src/variance/terms.rs
@@ -12,7 +12,7 @@
use rustc_arena::DroplessArena;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{LocalDefId, LocalDefIdMap};
-use rustc_middle::ty::{self, TyCtxt, TypeVisitableExt};
+use rustc_middle::ty::{self, TyCtxt};
use std::fmt;
use self::VarianceTerm::*;
@@ -97,9 +97,7 @@ pub fn determine_parameters_to_be_inferred<'a, 'tcx>(
}
}
DefKind::Fn | DefKind::AssocFn => terms_cx.add_inferreds_for_item(def_id),
- DefKind::TyAlias { lazy }
- if lazy || tcx.type_of(def_id).instantiate_identity().has_opaque_types() =>
- {
+ DefKind::TyAlias if tcx.type_alias_is_lazy(def_id) => {
terms_cx.add_inferreds_for_item(def_id)
}
_ => {}
diff --git a/compiler/rustc_hir_analysis/src/variance/test.rs b/compiler/rustc_hir_analysis/src/variance/test.rs
index d57d05d76..d98dc0e6b 100644
--- a/compiler/rustc_hir_analysis/src/variance/test.rs
+++ b/compiler/rustc_hir_analysis/src/variance/test.rs
@@ -1,9 +1,24 @@
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::CRATE_DEF_ID;
use rustc_middle::ty::TyCtxt;
use rustc_span::symbol::sym;
use crate::errors;
pub fn test_variance(tcx: TyCtxt<'_>) {
+ if tcx.has_attr(CRATE_DEF_ID, sym::rustc_variance_of_opaques) {
+ for id in tcx.hir().items() {
+ if matches!(tcx.def_kind(id.owner_id), DefKind::OpaqueTy) {
+ let variances_of = tcx.variances_of(id.owner_id);
+
+ tcx.sess.emit_err(errors::VariancesOf {
+ span: tcx.def_span(id.owner_id),
+ variances_of: format!("{variances_of:?}"),
+ });
+ }
+ }
+ }
+
// For unit testing: check for a special "rustc_variance"
// attribute and report an error with various results if found.
for id in tcx.hir().items() {
diff --git a/compiler/rustc_hir_pretty/src/lib.rs b/compiler/rustc_hir_pretty/src/lib.rs
index 89efdc269..8587b009f 100644
--- a/compiler/rustc_hir_pretty/src/lib.rs
+++ b/compiler/rustc_hir_pretty/src/lib.rs
@@ -2304,7 +2304,7 @@ impl<'a> State<'a> {
match header.asyncness {
hir::IsAsync::NotAsync => {}
- hir::IsAsync::Async => self.word_nbsp("async"),
+ hir::IsAsync::Async(_) => self.word_nbsp("async"),
}
self.print_unsafety(header.unsafety);
diff --git a/compiler/rustc_hir_typeck/messages.ftl b/compiler/rustc_hir_typeck/messages.ftl
index 2281343e2..9950a2263 100644
--- a/compiler/rustc_hir_typeck/messages.ftl
+++ b/compiler/rustc_hir_typeck/messages.ftl
@@ -16,6 +16,21 @@ hir_typeck_candidate_trait_note = `{$trait_name}` defines an item `{$item_name}`
*[other] , perhaps you need to restrict type parameter `{$action_or_ty}` with it
}
+hir_typeck_cannot_cast_to_bool = cannot cast `{$expr_ty}` as `bool`
+ .suggestion = compare with zero instead
+ .help = compare with zero instead
+ .label = unsupported cast
+
+hir_typeck_cast_enum_drop = cannot cast enum `{$expr_ty}` into integer `{$cast_ty}` because it implements `Drop`
+
+hir_typeck_cast_unknown_pointer = cannot cast {$to ->
+ [true] to
+ *[false] from
+ } a pointer of an unknown kind
+ .label_to = needs more type information
+ .note = the type information given here is insufficient to check whether the pointer cast is valid
+ .label_from = the type information given here is insufficient to check whether the pointer cast is valid
+
hir_typeck_const_select_must_be_const = this argument must be a `const fn`
.help = consult the documentation on `const_eval_select` for more information
@@ -29,10 +44,16 @@ hir_typeck_convert_using_method = try using `{$sugg}` to convert `{$found}` to `
hir_typeck_ctor_is_private = tuple struct constructor `{$def}` is private
+hir_typeck_deref_is_empty = this expression `Deref`s to `{$deref_ty}` which implements `is_empty`
+
hir_typeck_expected_default_return_type = expected `()` because of default return type
hir_typeck_expected_return_type = expected `{$expected}` because of return type
+hir_typeck_explicit_destructor = explicit use of destructor method
+ .label = explicit destructor calls not allowed
+ .suggestion = consider using `drop` function
+
hir_typeck_field_multiply_specified_in_initializer =
field `{$ident}` specified more than once
.label = used more than once
@@ -52,20 +73,32 @@ hir_typeck_functional_record_update_on_non_struct =
hir_typeck_help_set_edition_cargo = set `edition = "{$edition}"` in `Cargo.toml`
hir_typeck_help_set_edition_standalone = pass `--edition {$edition}` to `rustc`
-hir_typeck_lang_start_expected_sig_note = the `start` lang item should have the signature `fn(fn() -> T, isize, *const *const u8, u8) -> isize`
-hir_typeck_lang_start_incorrect_number_params = incorrect number of parameters for the `start` lang item
-hir_typeck_lang_start_incorrect_number_params_note_expected_count = the `start` lang item should have four parameters, but found {$found_param_count}
+hir_typeck_int_to_fat = cannot cast `{$expr_ty}` to a pointer that {$known_wide ->
+ [true] is
+ *[false] may be
+ } wide
+hir_typeck_int_to_fat_label = creating a `{$cast_ty}` requires both an address and {$metadata}
+hir_typeck_int_to_fat_label_nightly = consider casting this expression to `*const ()`, then using `core::ptr::from_raw_parts`
+
+hir_typeck_invalid_callee = expected function, found {$ty}
-hir_typeck_lang_start_incorrect_param = parameter {$param_num} of the `start` lang item is incorrect
- .suggestion = change the type from `{$found_ty}` to `{$expected_ty}`
+hir_typeck_lossy_provenance_int2ptr =
+ strict provenance disallows casting integer `{$expr_ty}` to pointer `{$cast_ty}`
+ .suggestion = use `.with_addr()` to adjust a valid pointer in the same allocation, to this address
+ .help = if you can't comply with strict provenance and don't have a pointer with the correct provenance you can use `std::ptr::from_exposed_addr()` instead
-hir_typeck_lang_start_incorrect_ret_ty = the return type of the `start` lang item is incorrect
- .suggestion = change the type from `{$found_ty}` to `{$expected_ty}`
+hir_typeck_lossy_provenance_ptr2int =
+ under strict provenance it is considered bad style to cast pointer `{$expr_ty}` to integer `{$cast_ty}`
+ .suggestion = use `.addr()` to obtain the address of a pointer
+ .help = if you can't comply with strict provenance and need to expose the pointer provenance you can use `.expose_addr()` instead
hir_typeck_method_call_on_unknown_raw_pointee =
cannot call a method on a raw pointer with an unknown pointee type
+hir_typeck_missing_fn_lang_items = failed to find an overloaded call trait for closure call
+ .help = make sure the `fn`/`fn_mut`/`fn_once` lang items are defined and have correctly defined `call`/`call_mut`/`call_once` methods
+
hir_typeck_missing_parentheses_in_range = can't call method `{$method_name}` on type `{$ty_str}`
hir_typeck_no_associated_item = no {$item_kind} named `{$item_name}` found for {$ty_prefix} `{$ty_str}`{$trait_missing_method ->
@@ -81,11 +114,20 @@ hir_typeck_option_result_asref = use `{$def_path}::as_ref` to convert `{$expecte
hir_typeck_option_result_cloned = use `{$def_path}::cloned` to clone the value inside the `{$def_path}`
hir_typeck_option_result_copied = use `{$def_path}::copied` to copy the value inside the `{$def_path}`
+hir_typeck_remove_semi_for_coerce = you might have meant to return the `match` expression
+hir_typeck_remove_semi_for_coerce_expr = this could be implicitly returned but it is a statement, not a tail expression
+hir_typeck_remove_semi_for_coerce_ret = the `match` arms can conform to this return type
+hir_typeck_remove_semi_for_coerce_semi = the `match` is a statement because of this semicolon, consider removing it
+hir_typeck_remove_semi_for_coerce_suggestion = remove this semicolon
+
hir_typeck_return_stmt_outside_of_fn_body =
{$statement_kind} statement outside of function body
.encl_body_label = the {$statement_kind} is part of this body...
.encl_fn_label = ...not the enclosing function body
+hir_typeck_rustcall_incorrect_args =
+ functions with the "rust-call" ABI must take a single non-self tuple argument
+
hir_typeck_struct_expr_non_exhaustive =
cannot create non-exhaustive {$what} using struct expression
@@ -95,8 +137,18 @@ hir_typeck_suggest_boxing_when_appropriate = store this in the heap by calling `
hir_typeck_suggest_ptr_null_mut = consider using `core::ptr::null_mut` instead
+hir_typeck_trivial_cast = trivial {$numeric ->
+ [true] numeric cast
+ *[false] cast
+ }: `{$expr_ty}` as `{$cast_ty}`
+ .help = cast can be replaced by coercion; this might require a temporary variable
+
hir_typeck_union_pat_dotdot = `..` cannot be used in union patterns
hir_typeck_union_pat_multiple_fields = union patterns should have exactly one field
+
+hir_typeck_use_is_empty =
+ consider using the `is_empty` method on `{$expr_ty}` to determine if it contains anything
+
hir_typeck_yield_expr_outside_of_generator =
yield expression outside of generator literal
diff --git a/compiler/rustc_hir_typeck/src/_match.rs b/compiler/rustc_hir_typeck/src/_match.rs
index 7ad9f51ba..81fe0cc48 100644
--- a/compiler/rustc_hir_typeck/src/_match.rs
+++ b/compiler/rustc_hir_typeck/src/_match.rs
@@ -1,7 +1,8 @@
use crate::coercion::{AsCoercionSite, CoerceMany};
use crate::{Diverges, Expectation, FnCtxt, Needs};
-use rustc_errors::{Applicability, Diagnostic, MultiSpan};
+use rustc_errors::Diagnostic;
use rustc_hir::{self as hir, ExprKind};
+use rustc_hir_pretty::ty_to_string;
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use rustc_infer::traits::Obligation;
use rustc_middle::ty::{self, Ty};
@@ -225,24 +226,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
return;
}
- let semi_span = expr.span.shrink_to_hi().with_hi(semi_span.hi());
- let mut ret_span: MultiSpan = semi_span.into();
- ret_span.push_span_label(
- expr.span,
- "this could be implicitly returned but it is a statement, not a tail expression",
- );
- ret_span.push_span_label(ret, "the `match` arms can conform to this return type");
- ret_span.push_span_label(
- semi_span,
- "the `match` is a statement because of this semicolon, consider removing it",
- );
- diag.span_note(ret_span, "you might have meant to return the `match` expression");
- diag.tool_only_span_suggestion(
- semi_span,
- "remove this semicolon",
- "",
- Applicability::MaybeIncorrect,
- );
+ let semi = expr.span.shrink_to_hi().with_hi(semi_span.hi());
+ let sugg = crate::errors::RemoveSemiForCoerce { expr: expr.span, ret, semi };
+ diag.subdiagnostic(sugg);
}
/// When the previously checked expression (the scrutinee) diverges,
@@ -267,7 +253,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
{
// If this `if` expr is the parent's function return expr,
// the cause of the type coercion is the return type, point at it. (#25228)
- let ret_reason = self.maybe_get_coercion_reason(then_expr.hir_id, span);
+ let hir_id = self.tcx.hir().parent_id(self.tcx.hir().parent_id(then_expr.hir_id));
+ let ret_reason = self.maybe_get_coercion_reason(hir_id, span);
let cause = self.cause(span, ObligationCauseCode::IfExpressionWithNoElse);
let mut error = false;
coercion.coerce_forced_unit(
@@ -290,11 +277,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
error
}
- fn maybe_get_coercion_reason(&self, hir_id: hir::HirId, sp: Span) -> Option<(Span, String)> {
- let node = {
- let rslt = self.tcx.hir().parent_id(self.tcx.hir().parent_id(hir_id));
- self.tcx.hir().get(rslt)
- };
+ pub fn maybe_get_coercion_reason(
+ &self,
+ hir_id: hir::HirId,
+ sp: Span,
+ ) -> Option<(Span, String)> {
+ let node = self.tcx.hir().get(hir_id);
if let hir::Node::Block(block) = node {
// check that the body's parent is an fn
let parent = self.tcx.hir().get_parent(self.tcx.hir().parent_id(block.hir_id));
@@ -304,9 +292,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// check that the `if` expr without `else` is the fn body's expr
if expr.span == sp {
return self.get_fn_decl(hir_id).and_then(|(_, fn_decl, _)| {
- let span = fn_decl.output.span();
- let snippet = self.tcx.sess.source_map().span_to_snippet(span).ok()?;
- Some((span, format!("expected `{snippet}` because of this return type")))
+ let (ty, span) = match fn_decl.output {
+ hir::FnRetTy::DefaultReturn(span) => ("()".to_string(), span),
+ hir::FnRetTy::Return(ty) => (ty_to_string(ty), ty.span),
+ };
+ Some((span, format!("expected `{ty}` because of this return type")))
});
}
}
diff --git a/compiler/rustc_hir_typeck/src/callee.rs b/compiler/rustc_hir_typeck/src/callee.rs
index 02371f85a..f2c58ee27 100644
--- a/compiler/rustc_hir_typeck/src/callee.rs
+++ b/compiler/rustc_hir_typeck/src/callee.rs
@@ -2,9 +2,9 @@ use super::method::probe::ProbeScope;
use super::method::MethodCallee;
use super::{Expectation, FnCtxt, TupleArgumentsFlag};
-use crate::type_error_struct;
+use crate::errors;
use rustc_ast::util::parser::PREC_POSTFIX;
-use rustc_errors::{struct_span_err, Applicability, Diagnostic, ErrorGuaranteed, StashKey};
+use rustc_errors::{Applicability, Diagnostic, ErrorGuaranteed, StashKey};
use rustc_hir as hir;
use rustc_hir::def::{self, CtorKind, DefKind, Namespace, Res};
use rustc_hir::def_id::DefId;
@@ -44,23 +44,15 @@ pub fn check_legal_trait_for_method_call(
trait_id: DefId,
) {
if tcx.lang_items().drop_trait() == Some(trait_id) {
- let mut err = struct_span_err!(tcx.sess, span, E0040, "explicit use of destructor method");
- err.span_label(span, "explicit destructor calls not allowed");
-
- let (sp, suggestion) = receiver
- .and_then(|s| tcx.sess.source_map().span_to_snippet(s).ok())
- .filter(|snippet| !snippet.is_empty())
- .map(|snippet| (expr_span, format!("drop({snippet})")))
- .unwrap_or_else(|| (span, "drop".to_string()));
-
- err.span_suggestion(
- sp,
- "consider using `drop` function",
- suggestion,
- Applicability::MaybeIncorrect,
- );
-
- err.emit();
+ let sugg = if let Some(receiver) = receiver.filter(|s| !s.is_empty()) {
+ errors::ExplicitDestructorCallSugg::Snippet {
+ lo: expr_span.shrink_to_lo(),
+ hi: receiver.shrink_to_hi().to(expr_span.shrink_to_hi()),
+ }
+ } else {
+ errors::ExplicitDestructorCallSugg::Empty(span)
+ };
+ tcx.sess.emit_err(errors::ExplicitDestructorCall { span, sugg });
}
}
@@ -387,6 +379,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Unit testing: function items annotated with
// `#[rustc_evaluate_where_clauses]` trigger special output
// to let us test the trait evaluation system.
+ // Untranslatable diagnostics are okay for rustc internals
+ #[allow(rustc::untranslatable_diagnostic)]
+ #[allow(rustc::diagnostic_outside_of_impl)]
if self.tcx.has_attr(def_id, sym::rustc_evaluate_where_clauses) {
let predicates = self.tcx.predicates_of(def_id);
let predicates = predicates.instantiate(self.tcx, args);
@@ -478,10 +473,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
);
self.require_type_is_sized(ty, sp, traits::RustCall);
} else {
- self.tcx.sess.span_err(
- sp,
- "functions with the \"rust-call\" ABI must take a single non-self tuple argument",
- );
+ self.tcx.sess.emit_err(errors::RustCallIncorrectArgs { span: sp });
}
}
@@ -610,17 +602,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
let callee_ty = self.resolve_vars_if_possible(callee_ty);
- let mut err = type_error_struct!(
- self.tcx.sess,
- callee_expr.span,
- callee_ty,
- E0618,
- "expected function, found {}",
- match &unit_variant {
+ let mut err = self.tcx.sess.create_err(errors::InvalidCallee {
+ span: callee_expr.span,
+ ty: match &unit_variant {
Some((_, kind, path)) => format!("{kind} `{path}`"),
None => format!("`{callee_ty}`"),
- }
- );
+ },
+ });
+ if callee_ty.references_error() {
+ err.downgrade_to_delayed_bug();
+ }
self.identify_bad_closure_def_and_call(
&mut err,
@@ -797,7 +788,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let effect = match const_context {
_ if host_always_on => tcx.consts.true_,
- Some(hir::ConstContext::Static(_) | hir::ConstContext::Const) => tcx.consts.false_,
+ Some(hir::ConstContext::Static(_) | hir::ConstContext::Const { .. }) => {
+ tcx.consts.false_
+ }
Some(hir::ConstContext::ConstFn) => {
let args = ty::GenericArgs::identity_for_item(tcx, context);
args.host_effect_param().expect("ConstContext::Maybe must have host effect param")
@@ -891,15 +884,7 @@ impl<'a, 'tcx> DeferredCallResolution<'tcx> {
None => {
// This can happen if `#![no_core]` is used and the `fn/fn_mut/fn_once`
// lang items are not defined (issue #86238).
- let mut err = fcx.inh.tcx.sess.struct_span_err(
- self.call_expr.span,
- "failed to find an overloaded call trait for closure call",
- );
- err.help(
- "make sure the `fn`/`fn_mut`/`fn_once` lang items are defined \
- and have correctly defined `call`/`call_mut`/`call_once` methods",
- );
- err.emit();
+ fcx.inh.tcx.sess.emit_err(errors::MissingFnLangItems { span: self.call_expr.span });
}
}
}
diff --git a/compiler/rustc_hir_typeck/src/cast.rs b/compiler/rustc_hir_typeck/src/cast.rs
index 31a03fabe..2b1ac7f35 100644
--- a/compiler/rustc_hir_typeck/src/cast.rs
+++ b/compiler/rustc_hir_typeck/src/cast.rs
@@ -30,11 +30,10 @@
use super::FnCtxt;
+use crate::errors;
use crate::type_error_struct;
use hir::ExprKind;
-use rustc_errors::{
- struct_span_err, Applicability, DelayDm, Diagnostic, DiagnosticBuilder, ErrorGuaranteed,
-};
+use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed};
use rustc_hir as hir;
use rustc_macros::{TypeFoldable, TypeVisitable};
use rustc_middle::mir::Mutability;
@@ -130,7 +129,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
| ty::Float(_)
| ty::Array(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::RawPtr(_)
| ty::Ref(..)
| ty::FnDef(..)
@@ -321,28 +319,15 @@ impl<'a, 'tcx> CastCheck<'tcx> {
.emit();
}
CastError::CastToBool => {
- let mut err =
- struct_span_err!(fcx.tcx.sess, self.span, E0054, "cannot cast as `bool`");
-
- if self.expr_ty.is_numeric() {
- match fcx.tcx.sess.source_map().span_to_snippet(self.expr_span) {
- Ok(snippet) => {
- err.span_suggestion(
- self.span,
- "compare with zero instead",
- format!("{snippet} != 0"),
- Applicability::MachineApplicable,
- );
- }
- Err(_) => {
- err.span_help(self.span, "compare with zero instead");
- }
- }
+ let expr_ty = fcx.resolve_vars_if_possible(self.expr_ty);
+ let help = if self.expr_ty.is_numeric() {
+ errors::CannotCastToBoolHelp::Numeric(
+ self.expr_span.shrink_to_hi().with_hi(self.span.hi()),
+ )
} else {
- err.span_label(self.span, "unsupported cast");
- }
-
- err.emit();
+ errors::CannotCastToBoolHelp::Unsupported(self.span)
+ };
+ fcx.tcx.sess.emit_err(errors::CannotCastToBool { span: self.span, expr_ty, help });
}
CastError::CastToChar => {
let mut err = type_error_struct!(
@@ -531,33 +516,20 @@ impl<'a, 'tcx> CastCheck<'tcx> {
.emit();
}
CastError::IntToFatCast(known_metadata) => {
- let mut err = struct_span_err!(
- fcx.tcx.sess,
- self.cast_span,
- E0606,
- "cannot cast `{}` to a pointer that {} wide",
- fcx.ty_to_string(self.expr_ty),
- if known_metadata.is_some() { "is" } else { "may be" }
- );
-
- err.span_label(
- self.cast_span,
- format!(
- "creating a `{}` requires both an address and {}",
- self.cast_ty,
- known_metadata.unwrap_or("type-specific metadata"),
- ),
- );
-
- if fcx.tcx.sess.is_nightly_build() {
- err.span_label(
- self.expr_span,
- "consider casting this expression to `*const ()`, \
- then using `core::ptr::from_raw_parts`",
- );
- }
-
- err.emit();
+ let expr_if_nightly = fcx.tcx.sess.is_nightly_build().then_some(self.expr_span);
+ let cast_ty = fcx.resolve_vars_if_possible(self.cast_ty);
+ let expr_ty = fcx.ty_to_string(self.expr_ty);
+ let metadata = known_metadata.unwrap_or("type-specific metadata");
+ let known_wide = known_metadata.is_some();
+ let span = self.cast_span;
+ fcx.tcx.sess.emit_err(errors::IntToWide {
+ span,
+ metadata,
+ expr_ty,
+ cast_ty,
+ expr_if_nightly,
+ known_wide,
+ });
}
CastError::UnknownCastPtrKind | CastError::UnknownExprPtrKind => {
let unknown_cast_to = match e {
@@ -565,27 +537,16 @@ impl<'a, 'tcx> CastCheck<'tcx> {
CastError::UnknownExprPtrKind => false,
_ => bug!(),
};
- let mut err = struct_span_err!(
- fcx.tcx.sess,
- if unknown_cast_to { self.cast_span } else { self.span },
- E0641,
- "cannot cast {} a pointer of an unknown kind",
- if unknown_cast_to { "to" } else { "from" }
- );
- if unknown_cast_to {
- err.span_label(self.cast_span, "needs more type information");
- err.note(
- "the type information given here is insufficient to check whether \
- the pointer cast is valid",
- );
+ let (span, sub) = if unknown_cast_to {
+ (self.cast_span, errors::CastUnknownPointerSub::To(self.cast_span))
} else {
- err.span_label(
- self.span,
- "the type information given here is insufficient to check whether \
- the pointer cast is valid",
- );
- }
- err.emit();
+ (self.cast_span, errors::CastUnknownPointerSub::From(self.span))
+ };
+ fcx.tcx.sess.emit_err(errors::CastUnknownPointer {
+ span,
+ to: unknown_cast_to,
+ sub,
+ });
}
CastError::ForeignNonExhaustiveAdt => {
make_invalid_casting_error(
@@ -669,31 +630,18 @@ impl<'a, 'tcx> CastCheck<'tcx> {
}
fn trivial_cast_lint(&self, fcx: &FnCtxt<'a, 'tcx>) {
- let t_cast = self.cast_ty;
- let t_expr = self.expr_ty;
- let (adjective, lint) = if t_cast.is_numeric() && t_expr.is_numeric() {
- ("numeric ", lint::builtin::TRIVIAL_NUMERIC_CASTS)
+ let (numeric, lint) = if self.cast_ty.is_numeric() && self.expr_ty.is_numeric() {
+ (true, lint::builtin::TRIVIAL_NUMERIC_CASTS)
} else {
- ("", lint::builtin::TRIVIAL_CASTS)
+ (false, lint::builtin::TRIVIAL_CASTS)
};
- fcx.tcx.struct_span_lint_hir(
+ let expr_ty = fcx.resolve_vars_if_possible(self.expr_ty);
+ let cast_ty = fcx.resolve_vars_if_possible(self.cast_ty);
+ fcx.tcx.emit_spanned_lint(
lint,
self.expr.hir_id,
self.span,
- DelayDm(|| {
- format!(
- "trivial {}cast: `{}` as `{}`",
- adjective,
- fcx.ty_to_string(t_expr),
- fcx.ty_to_string(t_cast)
- )
- }),
- |lint| {
- lint.help(
- "cast can be replaced by coercion; this might \
- require a temporary variable",
- )
- },
+ errors::TrivialCast { numeric, expr_ty, cast_ty },
);
}
@@ -776,6 +724,9 @@ impl<'a, 'tcx> CastCheck<'tcx> {
},
// array-ptr-cast
Ptr(mt) => {
+ if !fcx.type_is_sized_modulo_regions(fcx.param_env, mt.ty) {
+ return Err(CastError::IllegalCast);
+ }
self.check_ref_cast(fcx, TypeAndMut { mutbl, ty: inner_ty }, mt)
}
_ => Err(CastError::NonScalar),
@@ -786,7 +737,6 @@ impl<'a, 'tcx> CastCheck<'tcx> {
}
_ => return Err(CastError::NonScalar),
};
-
if let ty::Adt(adt_def, _) = *self.expr_ty.kind() {
if adt_def.did().krate != LOCAL_CRATE {
if adt_def.variants().iter().any(VariantDef::is_field_list_non_exhaustive) {
@@ -794,7 +744,6 @@ impl<'a, 'tcx> CastCheck<'tcx> {
}
}
}
-
match (t_from, t_cast) {
// These types have invariants! can't cast into them.
(_, Int(CEnum) | FnPtr) => Err(CastError::NonScalar),
@@ -986,93 +935,67 @@ impl<'a, 'tcx> CastCheck<'tcx> {
if let ty::Adt(d, _) = self.expr_ty.kind()
&& d.has_dtor(fcx.tcx)
{
- fcx.tcx.struct_span_lint_hir(
+ let expr_ty = fcx.resolve_vars_if_possible(self.expr_ty);
+ let cast_ty = fcx.resolve_vars_if_possible(self.cast_ty);
+
+ fcx.tcx.emit_spanned_lint(
lint::builtin::CENUM_IMPL_DROP_CAST,
self.expr.hir_id,
self.span,
- DelayDm(|| format!(
- "cannot cast enum `{}` into integer `{}` because it implements `Drop`",
- self.expr_ty, self.cast_ty
- )),
- |lint| {
- lint
- },
+ errors::CastEnumDrop {
+ expr_ty,
+ cast_ty,
+ }
);
}
}
fn lossy_provenance_ptr2int_lint(&self, fcx: &FnCtxt<'a, 'tcx>, t_c: ty::cast::IntTy) {
- fcx.tcx.struct_span_lint_hir(
+ let expr_prec = self.expr.precedence().order();
+ let needs_parens = expr_prec < rustc_ast::util::parser::PREC_POSTFIX;
+
+ let needs_cast = !matches!(t_c, ty::cast::IntTy::U(ty::UintTy::Usize));
+ let cast_span = self.expr_span.shrink_to_hi().to(self.cast_span);
+ let expr_ty = fcx.resolve_vars_if_possible(self.expr_ty);
+ let cast_ty = fcx.resolve_vars_if_possible(self.cast_ty);
+ let expr_span = self.expr_span.shrink_to_lo();
+ let sugg = match (needs_parens, needs_cast) {
+ (true, true) => errors::LossyProvenancePtr2IntSuggestion::NeedsParensCast {
+ expr_span,
+ cast_span,
+ cast_ty,
+ },
+ (true, false) => {
+ errors::LossyProvenancePtr2IntSuggestion::NeedsParens { expr_span, cast_span }
+ }
+ (false, true) => {
+ errors::LossyProvenancePtr2IntSuggestion::NeedsCast { cast_span, cast_ty }
+ }
+ (false, false) => errors::LossyProvenancePtr2IntSuggestion::Other { cast_span },
+ };
+
+ let lint = errors::LossyProvenancePtr2Int { expr_ty, cast_ty, sugg };
+ fcx.tcx.emit_spanned_lint(
lint::builtin::LOSSY_PROVENANCE_CASTS,
self.expr.hir_id,
self.span,
- DelayDm(|| format!(
- "under strict provenance it is considered bad style to cast pointer `{}` to integer `{}`",
- self.expr_ty, self.cast_ty
- )),
- |lint| {
- let msg = "use `.addr()` to obtain the address of a pointer";
-
- let expr_prec = self.expr.precedence().order();
- let needs_parens = expr_prec < rustc_ast::util::parser::PREC_POSTFIX;
-
- let scalar_cast = match t_c {
- ty::cast::IntTy::U(ty::UintTy::Usize) => String::new(),
- _ => format!(" as {}", self.cast_ty),
- };
-
- let cast_span = self.expr_span.shrink_to_hi().to(self.cast_span);
-
- if needs_parens {
- let suggestions = vec![
- (self.expr_span.shrink_to_lo(), String::from("(")),
- (cast_span, format!(").addr(){scalar_cast}")),
- ];
-
- lint.multipart_suggestion(msg, suggestions, Applicability::MaybeIncorrect);
- } else {
- lint.span_suggestion(
- cast_span,
- msg,
- format!(".addr(){scalar_cast}"),
- Applicability::MaybeIncorrect,
- );
- }
-
- lint.help(
- "if you can't comply with strict provenance and need to expose the pointer \
- provenance you can use `.expose_addr()` instead"
- );
-
- lint
- },
+ lint,
);
}
fn fuzzy_provenance_int2ptr_lint(&self, fcx: &FnCtxt<'a, 'tcx>) {
- fcx.tcx.struct_span_lint_hir(
+ let sugg = errors::LossyProvenanceInt2PtrSuggestion {
+ lo: self.expr_span.shrink_to_lo(),
+ hi: self.expr_span.shrink_to_hi().to(self.cast_span),
+ };
+ let expr_ty = fcx.resolve_vars_if_possible(self.expr_ty);
+ let cast_ty = fcx.resolve_vars_if_possible(self.cast_ty);
+ let lint = errors::LossyProvenanceInt2Ptr { expr_ty, cast_ty, sugg };
+ fcx.tcx.emit_spanned_lint(
lint::builtin::FUZZY_PROVENANCE_CASTS,
self.expr.hir_id,
self.span,
- DelayDm(|| format!(
- "strict provenance disallows casting integer `{}` to pointer `{}`",
- self.expr_ty, self.cast_ty
- )),
- |lint| {
- let msg = "use `.with_addr()` to adjust a valid pointer in the same allocation, to this address";
- let suggestions = vec![
- (self.expr_span.shrink_to_lo(), String::from("(...).with_addr(")),
- (self.expr_span.shrink_to_hi().to(self.cast_span), String::from(")")),
- ];
-
- lint.multipart_suggestion(msg, suggestions, Applicability::MaybeIncorrect);
- lint.help(
- "if you can't comply with strict provenance and don't have a pointer with \
- the correct provenance you can use `std::ptr::from_exposed_addr()` instead"
- );
-
- lint
- },
+ lint,
);
}
@@ -1088,26 +1011,19 @@ impl<'a, 'tcx> CastCheck<'tcx> {
if let Some((deref_ty, _)) = derefed {
// Give a note about what the expr derefs to.
if deref_ty != self.expr_ty.peel_refs() {
- err.span_note(
- self.expr_span,
- format!(
- "this expression `Deref`s to `{}` which implements `is_empty`",
- fcx.ty_to_string(deref_ty)
- ),
- );
+ err.subdiagnostic(errors::DerefImplsIsEmpty {
+ span: self.expr_span,
+ deref_ty: fcx.ty_to_string(deref_ty),
+ });
}
// Create a multipart suggestion: add `!` and `.is_empty()` in
// place of the cast.
- let suggestion = vec![
- (self.expr_span.shrink_to_lo(), "!".to_string()),
- (self.span.with_lo(self.expr_span.hi()), ".is_empty()".to_string()),
- ];
-
- err.multipart_suggestion_verbose(format!(
- "consider using the `is_empty` method on `{}` to determine if it contains anything",
- fcx.ty_to_string(self.expr_ty),
- ), suggestion, Applicability::MaybeIncorrect);
+ err.subdiagnostic(errors::UseIsEmpty {
+ lo: self.expr_span.shrink_to_lo(),
+ hi: self.span.with_lo(self.expr_span.hi()),
+ expr_ty: fcx.ty_to_string(self.expr_ty),
+ });
}
}
}
diff --git a/compiler/rustc_hir_typeck/src/check.rs b/compiler/rustc_hir_typeck/src/check.rs
index 1fc1e5aca..1fa0ec173 100644
--- a/compiler/rustc_hir_typeck/src/check.rs
+++ b/compiler/rustc_hir_typeck/src/check.rs
@@ -1,7 +1,6 @@
+use std::cell::RefCell;
+
use crate::coercion::CoerceMany;
-use crate::errors::{
- LangStartIncorrectNumberArgs, LangStartIncorrectParam, LangStartIncorrectRetTy,
-};
use crate::gather_locals::GatherLocalsVisitor;
use crate::FnCtxt;
use crate::GeneratorTypes;
@@ -9,14 +8,15 @@ use rustc_hir as hir;
use rustc_hir::def::DefKind;
use rustc_hir::intravisit::Visitor;
use rustc_hir::lang_items::LangItem;
-use rustc_hir_analysis::check::fn_maybe_err;
+use rustc_hir_analysis::check::{check_function_signature, fn_maybe_err};
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use rustc_infer::infer::RegionVariableOrigin;
use rustc_middle::ty::{self, Binder, Ty, TyCtxt};
use rustc_span::def_id::LocalDefId;
+use rustc_span::symbol::sym;
use rustc_target::spec::abi::Abi;
use rustc_trait_selection::traits;
-use std::cell::RefCell;
+use rustc_trait_selection::traits::{ObligationCause, ObligationCauseCode};
/// Helper used for fns and closures. Does the grungy work of checking a function
/// body and returns the function context used for that purpose, since in the case of a fn item
@@ -166,52 +166,17 @@ pub(super) fn check_fn<'a, 'tcx>(
if let Some(panic_impl_did) = tcx.lang_items().panic_impl()
&& panic_impl_did == fn_def_id.to_def_id()
{
- check_panic_info_fn(tcx, panic_impl_did.expect_local(), fn_sig, decl, declared_ret_ty);
+ check_panic_info_fn(tcx, panic_impl_did.expect_local(), fn_sig);
}
if let Some(lang_start_defid) = tcx.lang_items().start_fn() && lang_start_defid == fn_def_id.to_def_id() {
- check_lang_start_fn(tcx, fn_sig, decl, fn_def_id);
+ check_lang_start_fn(tcx, fn_sig, fn_def_id);
}
gen_ty
}
-fn check_panic_info_fn(
- tcx: TyCtxt<'_>,
- fn_id: LocalDefId,
- fn_sig: ty::FnSig<'_>,
- decl: &hir::FnDecl<'_>,
- declared_ret_ty: Ty<'_>,
-) {
- let Some(panic_info_did) = tcx.lang_items().panic_info() else {
- tcx.sess.err("language item required, but not found: `panic_info`");
- return;
- };
-
- if *declared_ret_ty.kind() != ty::Never {
- tcx.sess.span_err(decl.output.span(), "return type should be `!`");
- }
-
- let inputs = fn_sig.inputs();
- if inputs.len() != 1 {
- tcx.sess.span_err(tcx.def_span(fn_id), "function should have one argument");
- return;
- }
-
- let arg_is_panic_info = match *inputs[0].kind() {
- ty::Ref(region, ty, mutbl) => match *ty.kind() {
- ty::Adt(ref adt, _) => {
- adt.did() == panic_info_did && mutbl.is_not() && !region.is_static()
- }
- _ => false,
- },
- _ => false,
- };
-
- if !arg_is_panic_info {
- tcx.sess.span_err(decl.inputs[0].span, "argument should be `&PanicInfo`");
- }
-
+fn check_panic_info_fn(tcx: TyCtxt<'_>, fn_id: LocalDefId, fn_sig: ty::FnSig<'_>) {
let DefKind::Fn = tcx.def_kind(fn_id) else {
let span = tcx.def_span(fn_id);
tcx.sess.span_err(span, "should be a function");
@@ -227,125 +192,87 @@ fn check_panic_info_fn(
let span = tcx.def_span(fn_id);
tcx.sess.span_err(span, "should have no const parameters");
}
-}
-
-fn check_lang_start_fn<'tcx>(
- tcx: TyCtxt<'tcx>,
- fn_sig: ty::FnSig<'tcx>,
- decl: &'tcx hir::FnDecl<'tcx>,
- def_id: LocalDefId,
-) {
- let inputs = fn_sig.inputs();
-
- let arg_count = inputs.len();
- if arg_count != 4 {
- tcx.sess.emit_err(LangStartIncorrectNumberArgs {
- params_span: tcx.def_span(def_id),
- found_param_count: arg_count,
- });
- }
- // only check args if they should exist by checking the count
- // note: this does not handle args being shifted or their order swapped very nicely
- // but it's a lang item, users shouldn't frequently encounter this
-
- // first arg is `main: fn() -> T`
- if let Some(&main_arg) = inputs.get(0) {
- // make a Ty for the generic on the fn for diagnostics
- // FIXME: make the lang item generic checks check for the right generic *kind*
- // for example `start`'s generic should be a type parameter
- let generics = tcx.generics_of(def_id);
- let fn_generic = generics.param_at(0, tcx);
- let generic_ty = Ty::new_param(tcx, fn_generic.index, fn_generic.name);
- let expected_fn_sig =
- tcx.mk_fn_sig([], generic_ty, false, hir::Unsafety::Normal, Abi::Rust);
- let expected_ty = Ty::new_fn_ptr(tcx, Binder::dummy(expected_fn_sig));
-
- // we emit the same error to suggest changing the arg no matter what's wrong with the arg
- let emit_main_fn_arg_err = || {
- tcx.sess.emit_err(LangStartIncorrectParam {
- param_span: decl.inputs[0].span,
- param_num: 1,
- expected_ty: expected_ty,
- found_ty: main_arg,
- });
- };
-
- if let ty::FnPtr(main_fn_sig) = main_arg.kind() {
- let main_fn_inputs = main_fn_sig.inputs();
- if main_fn_inputs.iter().count() != 0 {
- emit_main_fn_arg_err();
- }
-
- let output = main_fn_sig.output();
- output.map_bound(|ret_ty| {
- // if the output ty is a generic, it's probably the right one
- if !matches!(ret_ty.kind(), ty::Param(_)) {
- emit_main_fn_arg_err();
- }
- });
- } else {
- emit_main_fn_arg_err();
- }
- }
-
- // second arg is isize
- if let Some(&argc_arg) = inputs.get(1) {
- if argc_arg != tcx.types.isize {
- tcx.sess.emit_err(LangStartIncorrectParam {
- param_span: decl.inputs[1].span,
- param_num: 2,
- expected_ty: tcx.types.isize,
- found_ty: argc_arg,
- });
- }
- }
-
- // third arg is `*const *const u8`
- if let Some(&argv_arg) = inputs.get(2) {
- let mut argv_is_okay = false;
- if let ty::RawPtr(outer_ptr) = argv_arg.kind() {
- if outer_ptr.mutbl.is_not() {
- if let ty::RawPtr(inner_ptr) = outer_ptr.ty.kind() {
- if inner_ptr.mutbl.is_not() && inner_ptr.ty == tcx.types.u8 {
- argv_is_okay = true;
- }
- }
- }
- }
-
- if !argv_is_okay {
- let inner_ptr_ty =
- Ty::new_ptr(tcx, ty::TypeAndMut { mutbl: hir::Mutability::Not, ty: tcx.types.u8 });
- let expected_ty =
- Ty::new_ptr(tcx, ty::TypeAndMut { mutbl: hir::Mutability::Not, ty: inner_ptr_ty });
- tcx.sess.emit_err(LangStartIncorrectParam {
- param_span: decl.inputs[2].span,
- param_num: 3,
- expected_ty,
- found_ty: argv_arg,
- });
- }
- }
+ let Some(panic_info_did) = tcx.lang_items().panic_info() else {
+ tcx.sess.err("language item required, but not found: `panic_info`");
+ return;
+ };
- // fourth arg is `sigpipe: u8`
- if let Some(&sigpipe_arg) = inputs.get(3) {
- if sigpipe_arg != tcx.types.u8 {
- tcx.sess.emit_err(LangStartIncorrectParam {
- param_span: decl.inputs[3].span,
- param_num: 4,
- expected_ty: tcx.types.u8,
- found_ty: sigpipe_arg,
- });
- }
- }
+ // build type `for<'a, 'b> fn(&'a PanicInfo<'b>) -> !`
+ let panic_info_ty = tcx.type_of(panic_info_did).instantiate(
+ tcx,
+ &[ty::GenericArg::from(ty::Region::new_late_bound(
+ tcx,
+ ty::INNERMOST,
+ ty::BoundRegion { var: ty::BoundVar::from_u32(1), kind: ty::BrAnon },
+ ))],
+ );
+ let panic_info_ref_ty = Ty::new_imm_ref(
+ tcx,
+ ty::Region::new_late_bound(
+ tcx,
+ ty::INNERMOST,
+ ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon },
+ ),
+ panic_info_ty,
+ );
+
+ let bounds = tcx.mk_bound_variable_kinds(&[
+ ty::BoundVariableKind::Region(ty::BrAnon),
+ ty::BoundVariableKind::Region(ty::BrAnon),
+ ]);
+ let expected_sig = ty::Binder::bind_with_vars(
+ tcx.mk_fn_sig([panic_info_ref_ty], tcx.types.never, false, fn_sig.unsafety, Abi::Rust),
+ bounds,
+ );
+
+ check_function_signature(
+ tcx,
+ ObligationCause::new(
+ tcx.def_span(fn_id),
+ fn_id,
+ ObligationCauseCode::LangFunctionType(sym::panic_impl),
+ ),
+ fn_id.into(),
+ expected_sig,
+ );
+}
- // output type is isize
- if fn_sig.output() != tcx.types.isize {
- tcx.sess.emit_err(LangStartIncorrectRetTy {
- ret_span: decl.output.span(),
- expected_ty: tcx.types.isize,
- found_ty: fn_sig.output(),
- });
- }
+fn check_lang_start_fn<'tcx>(tcx: TyCtxt<'tcx>, fn_sig: ty::FnSig<'tcx>, def_id: LocalDefId) {
+ // build type `fn(main: fn() -> T, argc: isize, argv: *const *const u8, sigpipe: u8)`
+
+ // make a Ty for the generic on the fn for diagnostics
+ // FIXME: make the lang item generic checks check for the right generic *kind*
+ // for example `start`'s generic should be a type parameter
+ let generics = tcx.generics_of(def_id);
+ let fn_generic = generics.param_at(0, tcx);
+ let generic_ty = Ty::new_param(tcx, fn_generic.index, fn_generic.name);
+ let main_fn_ty = Ty::new_fn_ptr(
+ tcx,
+ Binder::dummy(tcx.mk_fn_sig([], generic_ty, false, hir::Unsafety::Normal, Abi::Rust)),
+ );
+
+ let expected_sig = ty::Binder::dummy(tcx.mk_fn_sig(
+ [
+ main_fn_ty,
+ tcx.types.isize,
+ Ty::new_imm_ptr(tcx, Ty::new_imm_ptr(tcx, tcx.types.u8)),
+ tcx.types.u8,
+ ],
+ tcx.types.isize,
+ false,
+ fn_sig.unsafety,
+ Abi::Rust,
+ ));
+
+ check_function_signature(
+ tcx,
+ ObligationCause::new(
+ tcx.def_span(def_id),
+ def_id,
+ ObligationCauseCode::LangFunctionType(sym::start),
+ ),
+ def_id.into(),
+ expected_sig,
+ );
}
diff --git a/compiler/rustc_hir_typeck/src/closure.rs b/compiler/rustc_hir_typeck/src/closure.rs
index b19fb6da6..e426b9375 100644
--- a/compiler/rustc_hir_typeck/src/closure.rs
+++ b/compiler/rustc_hir_typeck/src/closure.rs
@@ -231,7 +231,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let inferred_sig = self.normalize(
span,
self.deduce_sig_from_projection(
- Some(span),
+ Some(span),
bound_predicate.rebind(proj_predicate),
),
);
diff --git a/compiler/rustc_hir_typeck/src/demand.rs b/compiler/rustc_hir_typeck/src/demand.rs
index 2c16f21b4..d97691369 100644
--- a/compiler/rustc_hir_typeck/src/demand.rs
+++ b/compiler/rustc_hir_typeck/src/demand.rs
@@ -83,6 +83,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
self.annotate_expected_due_to_let_ty(err, expr, error);
+ self.annotate_loop_expected_due_to_inference(err, expr, error);
// FIXME(#73154): For now, we do leak check when coercing function
// pointers in typeck, instead of only during borrowck. This can lead
@@ -527,6 +528,136 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
false
}
+ // When encountering a type error on the value of a `break`, try to point at the reason for the
+ // expected type.
+ pub fn annotate_loop_expected_due_to_inference(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ error: Option<TypeError<'tcx>>,
+ ) {
+ let Some(TypeError::Sorts(ExpectedFound { expected, .. })) = error else {
+ return;
+ };
+ let mut parent_id = self.tcx.hir().parent_id(expr.hir_id);
+ let mut parent;
+ 'outer: loop {
+ // Climb the HIR tree to see if the current `Expr` is part of a `break;` statement.
+ let Some(
+ hir::Node::Stmt(hir::Stmt { kind: hir::StmtKind::Semi(&ref p), .. })
+ | hir::Node::Block(hir::Block { expr: Some(&ref p), .. })
+ | hir::Node::Expr(&ref p),
+ ) = self.tcx.hir().find(parent_id)
+ else {
+ break;
+ };
+ parent = p;
+ parent_id = self.tcx.hir().parent_id(parent_id);
+ let hir::ExprKind::Break(destination, _) = parent.kind else {
+ continue;
+ };
+ let mut parent_id = parent_id;
+ let mut direct = false;
+ loop {
+ // Climb the HIR tree to find the (desugared) `loop` this `break` corresponds to.
+ let parent = match self.tcx.hir().find(parent_id) {
+ Some(hir::Node::Expr(&ref parent)) => {
+ parent_id = self.tcx.hir().parent_id(parent.hir_id);
+ parent
+ }
+ Some(hir::Node::Stmt(hir::Stmt {
+ hir_id,
+ kind: hir::StmtKind::Semi(&ref parent) | hir::StmtKind::Expr(&ref parent),
+ ..
+ })) => {
+ parent_id = self.tcx.hir().parent_id(*hir_id);
+ parent
+ }
+ Some(hir::Node::Block(_)) => {
+ parent_id = self.tcx.hir().parent_id(parent_id);
+ parent
+ }
+ _ => break,
+ };
+ if let hir::ExprKind::Loop(..) = parent.kind {
+ // When you have `'a: loop { break; }`, the `break` corresponds to the labeled
+ // loop, so we need to account for that.
+ direct = !direct;
+ }
+ if let hir::ExprKind::Loop(block, label, _, span) = parent.kind
+ && (destination.label == label || direct)
+ {
+ if let Some((reason_span, message)) =
+ self.maybe_get_coercion_reason(parent_id, parent.span)
+ {
+ err.span_label(reason_span, message);
+ err.span_label(
+ span,
+ format!("this loop is expected to be of type `{expected}`"),
+ );
+ break 'outer;
+ } else {
+ // Locate all other `break` statements within the same `loop` that might
+ // have affected inference.
+ struct FindBreaks<'tcx> {
+ label: Option<rustc_ast::Label>,
+ uses: Vec<&'tcx hir::Expr<'tcx>>,
+ nest_depth: usize,
+ }
+ impl<'tcx> Visitor<'tcx> for FindBreaks<'tcx> {
+ fn visit_expr(&mut self, ex: &'tcx hir::Expr<'tcx>) {
+ let nest_depth = self.nest_depth;
+ if let hir::ExprKind::Loop(_, label, _, _) = ex.kind {
+ if label == self.label {
+ // Account for `'a: loop { 'a: loop {...} }`.
+ return;
+ }
+ self.nest_depth += 1;
+ }
+ if let hir::ExprKind::Break(destination, _) = ex.kind
+ && (self.label == destination.label
+ // Account for `loop { 'a: loop { loop { break; } } }`.
+ || destination.label.is_none() && self.nest_depth == 0)
+ {
+ self.uses.push(ex);
+ }
+ hir::intravisit::walk_expr(self, ex);
+ self.nest_depth = nest_depth;
+ }
+ }
+ let mut expr_finder = FindBreaks { label, uses: vec![], nest_depth: 0 };
+ expr_finder.visit_block(block);
+ let mut exit = false;
+ for ex in expr_finder.uses {
+ let hir::ExprKind::Break(_, val) = ex.kind else {
+ continue;
+ };
+ let ty = match val {
+ Some(val) => {
+ match self.typeck_results.borrow().expr_ty_adjusted_opt(val) {
+ None => continue,
+ Some(ty) => ty,
+ }
+ }
+ None => self.tcx.types.unit,
+ };
+ if self.can_eq(self.param_env, ty, expected) {
+ err.span_label(
+ ex.span,
+ "expected because of this `break`",
+ );
+ exit = true;
+ }
+ }
+ if exit {
+ break 'outer;
+ }
+ }
+ }
+ }
+ }
+ }
+
fn annotate_expected_due_to_let_ty(
&self,
err: &mut Diagnostic,
diff --git a/compiler/rustc_hir_typeck/src/errors.rs b/compiler/rustc_hir_typeck/src/errors.rs
index 054d23c71..1526988fb 100644
--- a/compiler/rustc_hir_typeck/src/errors.rs
+++ b/compiler/rustc_hir_typeck/src/errors.rs
@@ -6,7 +6,7 @@ use rustc_errors::{
AddToDiagnostic, Applicability, Diagnostic, DiagnosticArgValue, IntoDiagnosticArg, MultiSpan,
SubdiagnosticMessage,
};
-use rustc_macros::{Diagnostic, Subdiagnostic};
+use rustc_macros::{Diagnostic, LintDiagnostic, Subdiagnostic};
use rustc_middle::ty::Ty;
use rustc_span::{
edition::{Edition, LATEST_STABLE_EDITION},
@@ -55,6 +55,13 @@ impl IntoDiagnosticArg for ReturnLikeStatementKind {
}
#[derive(Diagnostic)]
+#[diag(hir_typeck_rustcall_incorrect_args)]
+pub struct RustCallIncorrectArgs {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
#[diag(hir_typeck_yield_expr_outside_of_generator, code = "E0627")]
pub struct YieldExprOutsideOfGenerator {
#[primary_span]
@@ -77,6 +84,14 @@ pub struct MethodCallOnUnknownRawPointee {
}
#[derive(Diagnostic)]
+#[diag(hir_typeck_missing_fn_lang_items)]
+#[help]
+pub struct MissingFnLangItems {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
#[diag(hir_typeck_functional_record_update_on_non_struct, code = "E0436")]
pub struct FunctionalRecordUpdateOnNonStruct {
#[primary_span]
@@ -130,6 +145,29 @@ pub enum ExpectedReturnTypeLabel<'tcx> {
}
#[derive(Diagnostic)]
+#[diag(hir_typeck_explicit_destructor, code = "E0040")]
+pub struct ExplicitDestructorCall {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[subdiagnostic]
+ pub sugg: ExplicitDestructorCallSugg,
+}
+
+#[derive(Subdiagnostic)]
+pub enum ExplicitDestructorCallSugg {
+ #[suggestion(hir_typeck_suggestion, code = "drop", applicability = "maybe-incorrect")]
+ Empty(#[primary_span] Span),
+ #[multipart_suggestion(hir_typeck_suggestion, style = "short")]
+ Snippet {
+ #[suggestion_part(code = "drop(")]
+ lo: Span,
+ #[suggestion_part(code = ")")]
+ hi: Span,
+ },
+}
+
+#[derive(Diagnostic)]
#[diag(hir_typeck_missing_parentheses_in_range, code = "E0689")]
pub struct MissingParenthesesInRange {
#[primary_span]
@@ -198,37 +236,67 @@ impl AddToDiagnostic for TypeMismatchFruTypo {
}
}
-#[derive(Diagnostic)]
-#[diag(hir_typeck_lang_start_incorrect_number_params)]
-#[note(hir_typeck_lang_start_incorrect_number_params_note_expected_count)]
-#[note(hir_typeck_lang_start_expected_sig_note)]
-pub struct LangStartIncorrectNumberArgs {
- #[primary_span]
- pub params_span: Span,
- pub found_param_count: usize,
+#[derive(LintDiagnostic)]
+#[diag(hir_typeck_lossy_provenance_int2ptr)]
+#[help]
+pub struct LossyProvenanceInt2Ptr<'tcx> {
+ pub expr_ty: Ty<'tcx>,
+ pub cast_ty: Ty<'tcx>,
+ #[subdiagnostic]
+ pub sugg: LossyProvenanceInt2PtrSuggestion,
}
-#[derive(Diagnostic)]
-#[diag(hir_typeck_lang_start_incorrect_param)]
-pub struct LangStartIncorrectParam<'tcx> {
- #[primary_span]
- #[suggestion(style = "short", code = "{expected_ty}", applicability = "machine-applicable")]
- pub param_span: Span,
-
- pub param_num: usize,
- pub expected_ty: Ty<'tcx>,
- pub found_ty: Ty<'tcx>,
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(hir_typeck_suggestion, applicability = "has-placeholders")]
+pub struct LossyProvenanceInt2PtrSuggestion {
+ #[suggestion_part(code = "(...).with_addr(")]
+ pub lo: Span,
+ #[suggestion_part(code = ")")]
+ pub hi: Span,
}
-#[derive(Diagnostic)]
-#[diag(hir_typeck_lang_start_incorrect_ret_ty)]
-pub struct LangStartIncorrectRetTy<'tcx> {
- #[primary_span]
- #[suggestion(style = "short", code = "{expected_ty}", applicability = "machine-applicable")]
- pub ret_span: Span,
+#[derive(LintDiagnostic)]
+#[diag(hir_typeck_lossy_provenance_ptr2int)]
+#[help]
+pub struct LossyProvenancePtr2Int<'tcx> {
+ pub expr_ty: Ty<'tcx>,
+ pub cast_ty: Ty<'tcx>,
+ #[subdiagnostic]
+ pub sugg: LossyProvenancePtr2IntSuggestion<'tcx>,
+}
- pub expected_ty: Ty<'tcx>,
- pub found_ty: Ty<'tcx>,
+#[derive(Subdiagnostic)]
+pub enum LossyProvenancePtr2IntSuggestion<'tcx> {
+ #[multipart_suggestion(hir_typeck_suggestion, applicability = "maybe-incorrect")]
+ NeedsParensCast {
+ #[suggestion_part(code = "(")]
+ expr_span: Span,
+ #[suggestion_part(code = ").addr() as {cast_ty}")]
+ cast_span: Span,
+ cast_ty: Ty<'tcx>,
+ },
+ #[multipart_suggestion(hir_typeck_suggestion, applicability = "maybe-incorrect")]
+ NeedsParens {
+ #[suggestion_part(code = "(")]
+ expr_span: Span,
+ #[suggestion_part(code = ").addr()")]
+ cast_span: Span,
+ },
+ #[suggestion(
+ hir_typeck_suggestion,
+ code = ".addr() as {cast_ty}",
+ applicability = "maybe-incorrect"
+ )]
+ NeedsCast {
+ #[primary_span]
+ cast_span: Span,
+ cast_ty: Ty<'tcx>,
+ },
+ #[suggestion(hir_typeck_suggestion, code = ".addr()", applicability = "maybe-incorrect")]
+ Other {
+ #[primary_span]
+ cast_span: Span,
+ },
}
#[derive(Subdiagnostic)]
@@ -252,6 +320,28 @@ impl HelpUseLatestEdition {
}
}
+#[derive(Diagnostic)]
+#[diag(hir_typeck_invalid_callee, code = "E0618")]
+pub struct InvalidCallee {
+ #[primary_span]
+ pub span: Span,
+ pub ty: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_typeck_int_to_fat, code = "E0606")]
+pub struct IntToWide<'tcx> {
+ #[primary_span]
+ #[label(hir_typeck_int_to_fat_label)]
+ pub span: Span,
+ pub metadata: &'tcx str,
+ pub expr_ty: String,
+ pub cast_ty: Ty<'tcx>,
+ #[label(hir_typeck_int_to_fat_label_nightly)]
+ pub expr_if_nightly: Option<Span>,
+ pub known_wide: bool,
+}
+
#[derive(Subdiagnostic)]
pub enum OptionResultRefMismatch {
#[suggestion(
@@ -292,6 +382,32 @@ pub enum OptionResultRefMismatch {
// },
}
+pub struct RemoveSemiForCoerce {
+ pub expr: Span,
+ pub ret: Span,
+ pub semi: Span,
+}
+
+impl AddToDiagnostic for RemoveSemiForCoerce {
+ fn add_to_diagnostic_with<F>(self, diag: &mut Diagnostic, _: F)
+ where
+ F: Fn(&mut Diagnostic, SubdiagnosticMessage) -> SubdiagnosticMessage,
+ {
+ let mut multispan: MultiSpan = self.semi.into();
+ multispan.push_span_label(self.expr, fluent::hir_typeck_remove_semi_for_coerce_expr);
+ multispan.push_span_label(self.ret, fluent::hir_typeck_remove_semi_for_coerce_ret);
+ multispan.push_span_label(self.semi, fluent::hir_typeck_remove_semi_for_coerce_semi);
+ diag.span_note(multispan, fluent::hir_typeck_remove_semi_for_coerce);
+
+ diag.tool_only_span_suggestion(
+ self.semi,
+ fluent::hir_typeck_remove_semi_for_coerce_suggestion,
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ }
+}
+
#[derive(Diagnostic)]
#[diag(hir_typeck_const_select_must_be_const)]
#[help]
@@ -324,6 +440,20 @@ pub struct UnionPatDotDot {
pub span: Span,
}
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(
+ hir_typeck_use_is_empty,
+ applicability = "maybe-incorrect",
+ style = "verbose"
+)]
+pub struct UseIsEmpty {
+ #[suggestion_part(code = "!")]
+ pub lo: Span,
+ #[suggestion_part(code = ".is_empty()")]
+ pub hi: Span,
+ pub expr_ty: String,
+}
+
#[derive(Diagnostic)]
#[diag(hir_typeck_arg_mismatch_indeterminate)]
pub struct ArgMismatchIndeterminate {
@@ -370,6 +500,15 @@ pub struct SuggestPtrNullMut {
pub span: Span,
}
+#[derive(LintDiagnostic)]
+#[diag(hir_typeck_trivial_cast)]
+#[help]
+pub struct TrivialCast<'tcx> {
+ pub numeric: bool,
+ pub expr_ty: Ty<'tcx>,
+ pub cast_ty: Ty<'tcx>,
+}
+
#[derive(Diagnostic)]
#[diag(hir_typeck_no_associated_item, code = "E0599")]
pub struct NoAssociatedItem {
@@ -393,6 +532,74 @@ pub struct CandidateTraitNote {
}
#[derive(Diagnostic)]
+#[diag(hir_typeck_cannot_cast_to_bool, code = "E0054")]
+pub struct CannotCastToBool<'tcx> {
+ #[primary_span]
+ pub span: Span,
+ pub expr_ty: Ty<'tcx>,
+ #[subdiagnostic]
+ pub help: CannotCastToBoolHelp,
+}
+
+#[derive(LintDiagnostic)]
+#[diag(hir_typeck_cast_enum_drop)]
+pub struct CastEnumDrop<'tcx> {
+ pub expr_ty: Ty<'tcx>,
+ pub cast_ty: Ty<'tcx>,
+}
+
+#[derive(Diagnostic)]
+#[diag(hir_typeck_cast_unknown_pointer, code = "E0641")]
+pub struct CastUnknownPointer {
+ #[primary_span]
+ pub span: Span,
+ pub to: bool,
+ #[subdiagnostic]
+ pub sub: CastUnknownPointerSub,
+}
+
+pub enum CastUnknownPointerSub {
+ To(Span),
+ From(Span),
+}
+
+impl rustc_errors::AddToDiagnostic for CastUnknownPointerSub {
+ fn add_to_diagnostic_with<F>(self, diag: &mut rustc_errors::Diagnostic, f: F)
+ where
+ F: Fn(
+ &mut Diagnostic,
+ rustc_errors::SubdiagnosticMessage,
+ ) -> rustc_errors::SubdiagnosticMessage,
+ {
+ match self {
+ CastUnknownPointerSub::To(span) => {
+ let msg = f(diag, crate::fluent_generated::hir_typeck_label_to.into());
+ diag.span_label(span, msg);
+ let msg = f(diag, crate::fluent_generated::hir_typeck_note.into());
+ diag.note(msg);
+ }
+ CastUnknownPointerSub::From(span) => {
+ let msg = f(diag, crate::fluent_generated::hir_typeck_label_from.into());
+ diag.span_label(span, msg);
+ }
+ }
+ }
+}
+
+#[derive(Subdiagnostic)]
+pub enum CannotCastToBoolHelp {
+ #[suggestion(
+ hir_typeck_suggestion,
+ applicability = "machine-applicable",
+ code = " != 0",
+ style = "verbose"
+ )]
+ Numeric(#[primary_span] Span),
+ #[label(hir_typeck_label)]
+ Unsupported(#[primary_span] Span),
+}
+
+#[derive(Diagnostic)]
#[diag(hir_typeck_ctor_is_private, code = "E0603")]
pub struct CtorIsPrivate {
#[primary_span]
@@ -401,6 +608,14 @@ pub struct CtorIsPrivate {
}
#[derive(Subdiagnostic)]
+#[note(hir_typeck_deref_is_empty)]
+pub struct DerefImplsIsEmpty {
+ #[primary_span]
+ pub span: Span,
+ pub deref_ty: String,
+}
+
+#[derive(Subdiagnostic)]
#[multipart_suggestion(
hir_typeck_convert_using_method,
applicability = "machine-applicable",
diff --git a/compiler/rustc_hir_typeck/src/expr.rs b/compiler/rustc_hir_typeck/src/expr.rs
index 7cea40fdd..eead4da5e 100644
--- a/compiler/rustc_hir_typeck/src/expr.rs
+++ b/compiler/rustc_hir_typeck/src/expr.rs
@@ -43,7 +43,10 @@ use rustc_infer::traits::query::NoSolution;
use rustc_infer::traits::ObligationCause;
use rustc_middle::middle::stability;
use rustc_middle::ty::adjustment::{Adjust, Adjustment, AllowTwoPhase};
-use rustc_middle::ty::error::TypeError::FieldMisMatch;
+use rustc_middle::ty::error::{
+ ExpectedFound,
+ TypeError::{FieldMisMatch, Sorts},
+};
use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, AdtKind, Ty, TypeVisitableExt};
use rustc_session::errors::ExprParenthesesNeeded;
@@ -525,8 +528,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
_ => self.instantiate_value_path(segs, opt_ty, res, expr.span, expr.hir_id).0,
};
- if let ty::FnDef(did, ..) = *ty.kind() {
+ if let ty::FnDef(did, callee_args) = *ty.kind() {
let fn_sig = ty.fn_sig(tcx);
+
+ // HACK: whenever we get a FnDef in a non-const context, enforce effects to get the
+ // default `host = true` to avoid inference errors later.
+ if tcx.hir().body_const_context(self.body_id).is_none() {
+ self.enforce_context_effects(expr.hir_id, qpath.span(), did, callee_args);
+ }
if tcx.fn_sig(did).skip_binder().abi() == RustIntrinsic
&& tcx.item_name(did) == sym::transmute
{
@@ -658,15 +667,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.suggest_mismatched_types_on_tail(
&mut err, expr, ty, e_ty, target_id,
);
+ let error = Some(Sorts(ExpectedFound { expected: ty, found: e_ty }));
+ self.annotate_loop_expected_due_to_inference(&mut err, expr, error);
if let Some(val) = ty_kind_suggestion(ty) {
- let label = destination
- .label
- .map(|l| format!(" {}", l.ident))
- .unwrap_or_else(String::new);
- err.span_suggestion(
- expr.span,
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_hi(),
"give it a value of the expected type",
- format!("break{label} {val}"),
+ format!(" {val}"),
Applicability::HasPlaceholders,
);
}
@@ -711,7 +718,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// ... except when we try to 'break rust;'.
// ICE this expression in particular (see #43162).
if let ExprKind::Path(QPath::Resolved(_, path)) = e.kind {
- if path.segments.len() == 1 && path.segments[0].ident.name == sym::rust {
+ if let [segment] = path.segments && segment.ident.name == sym::rust {
fatally_break_rust(self.tcx);
}
}
@@ -1203,7 +1210,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// otherwise check exactly as a let statement
self.check_decl(let_expr.into());
// but return a bool, for this is a boolean expression
- self.tcx.types.bool
+ if let Some(error_guaranteed) = let_expr.is_recovered {
+ self.set_tainted_by_errors(error_guaranteed);
+ Ty::new_error(self.tcx, error_guaranteed)
+ } else {
+ self.tcx.types.bool
+ }
}
fn check_expr_loop(
@@ -2310,13 +2322,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let guar = if field.name == kw::Empty {
self.tcx.sess.delay_span_bug(field.span, "field name with no name")
- } else if self.method_exists(
- field,
- base_ty,
- expr.hir_id,
- true,
- expected.only_has_type(self),
- ) {
+ } else if self.method_exists(field, base_ty, expr.hir_id, expected.only_has_type(self)) {
self.ban_take_value_of_method(expr, base_ty, field)
} else if !base_ty.is_primitive_ty() {
self.ban_nonexisting_field(field, base, expr, base_ty)
@@ -2501,7 +2507,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let mut err = self.private_field_err(field, base_did);
// Also check if an accessible method exists, which is often what is meant.
- if self.method_exists(field, expr_t, expr.hir_id, false, return_ty)
+ if self.method_exists(field, expr_t, expr.hir_id, return_ty)
&& !self.expr_in_place(expr.hir_id)
{
self.suggest_method_call(
diff --git a/compiler/rustc_hir_typeck/src/expr_use_visitor.rs b/compiler/rustc_hir_typeck/src/expr_use_visitor.rs
index 840910732..8bc66ac55 100644
--- a/compiler/rustc_hir_typeck/src/expr_use_visitor.rs
+++ b/compiler/rustc_hir_typeck/src/expr_use_visitor.rs
@@ -664,10 +664,12 @@ impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx> {
);
self.walk_pat(discr_place, arm.pat, arm.guard.is_some());
- if let Some(hir::Guard::If(e)) = arm.guard {
- self.consume_expr(e)
- } else if let Some(hir::Guard::IfLet(ref l)) = arm.guard {
- self.consume_expr(l.init)
+ match arm.guard {
+ Some(hir::Guard::If(ref e)) => self.consume_expr(e),
+ Some(hir::Guard::IfLet(ref l)) => {
+ self.walk_local(l.init, l.pat, None, |t| t.borrow_expr(l.init, ty::ImmBorrow))
+ }
+ None => {}
}
self.consume_expr(arm.body);
diff --git a/compiler/rustc_hir_typeck/src/fallback.rs b/compiler/rustc_hir_typeck/src/fallback.rs
index 5b5986a34..952b90d6a 100644
--- a/compiler/rustc_hir_typeck/src/fallback.rs
+++ b/compiler/rustc_hir_typeck/src/fallback.rs
@@ -4,6 +4,7 @@ use rustc_data_structures::{
graph::{iterate::DepthFirstSearch, vec_graph::VecGraph},
unord::{UnordBag, UnordMap, UnordSet},
};
+use rustc_infer::infer::{DefineOpaqueTypes, InferOk};
use rustc_middle::ty::{self, Ty};
impl<'tcx> FnCtxt<'_, 'tcx> {
@@ -23,20 +24,10 @@ impl<'tcx> FnCtxt<'_, 'tcx> {
self.fulfillment_cx.borrow_mut().pending_obligations()
);
- // Check if we have any unsolved variables. If not, no need for fallback.
- let unsolved_variables = self.unsolved_variables();
- if unsolved_variables.is_empty() {
- return;
- }
+ let fallback_occured = self.fallback_types() | self.fallback_effects();
- let diverging_fallback = self.calculate_diverging_fallback(&unsolved_variables);
-
- // We do fallback in two passes, to try to generate
- // better error messages.
- // The first time, we do *not* replace opaque types.
- for ty in unsolved_variables {
- debug!("unsolved_variable = {:?}", ty);
- self.fallback_if_possible(ty, &diverging_fallback);
+ if !fallback_occured {
+ return;
}
// We now see if we can make progress. This might cause us to
@@ -65,6 +56,53 @@ impl<'tcx> FnCtxt<'_, 'tcx> {
self.select_obligations_where_possible(|_| {});
}
+ fn fallback_types(&self) -> bool {
+ // Check if we have any unsolved variables. If not, no need for fallback.
+ let unsolved_variables = self.unsolved_variables();
+
+ if unsolved_variables.is_empty() {
+ return false;
+ }
+
+ let diverging_fallback = self.calculate_diverging_fallback(&unsolved_variables);
+
+ // We do fallback in two passes, to try to generate
+ // better error messages.
+ // The first time, we do *not* replace opaque types.
+ for ty in unsolved_variables {
+ debug!("unsolved_variable = {:?}", ty);
+ self.fallback_if_possible(ty, &diverging_fallback);
+ }
+
+ true
+ }
+
+ fn fallback_effects(&self) -> bool {
+ let unsolved_effects = self.unsolved_effects();
+
+ if unsolved_effects.is_empty() {
+ return false;
+ }
+
+ // not setting `fallback_has_occured` here because that field is only used for type fallback
+ // diagnostics.
+
+ for effect in unsolved_effects {
+ let expected = self.tcx.consts.true_;
+ let cause = self.misc(rustc_span::DUMMY_SP);
+ match self.at(&cause, self.param_env).eq(DefineOpaqueTypes::Yes, expected, effect) {
+ Ok(InferOk { obligations, value: () }) => {
+ self.register_predicates(obligations);
+ }
+ Err(e) => {
+ bug!("cannot eq unsolved effect: {e:?}")
+ }
+ }
+ }
+
+ true
+ }
+
// Tries to apply a fallback to `ty` if it is an unsolved variable.
//
// - Unconstrained ints are replaced with `i32`.
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs
index 28fe2e062..415920221 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs
@@ -509,21 +509,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
typeck_results.rvalue_scopes = rvalue_scopes;
}
- pub(in super::super) fn resolve_generator_interiors(&self, def_id: DefId) {
- if self.tcx.sess.opts.unstable_opts.drop_tracking_mir {
- self.save_generator_interior_predicates(def_id);
- return;
- }
-
- self.select_obligations_where_possible(|_| {});
-
- let mut generators = self.deferred_generator_interiors.borrow_mut();
- for (_, body_id, interior, kind) in generators.drain(..) {
- crate::generator_interior::resolve_interior(self, def_id, body_id, interior, kind);
- self.select_obligations_where_possible(|_| {});
- }
- }
-
/// Unify the inference variables corresponding to generator witnesses, and save all the
/// predicates that were stalled on those inference variables.
///
@@ -533,7 +518,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
/// We must not attempt to select obligations after this method has run, or risk query cycle
/// ICE.
#[instrument(level = "debug", skip(self))]
- fn save_generator_interior_predicates(&self, def_id: DefId) {
+ pub(in super::super) fn resolve_generator_interiors(&self, def_id: DefId) {
// Try selecting all obligations that are not blocked on inference variables.
// Once we start unifying generator witnesses, trying to select obligations on them will
// trigger query cycle ICEs, as doing so requires MIR.
@@ -550,7 +535,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.tcx,
self.tcx.typeck_root_def_id(expr_def_id.to_def_id()),
);
- let witness = Ty::new_generator_witness_mir(self.tcx, expr_def_id.to_def_id(), args);
+ let witness = Ty::new_generator_witness(self.tcx, expr_def_id.to_def_id(), args);
// Unify `interior` with `witness` and collect all the resulting obligations.
let span = self.tcx.hir().body(body_id).value.span;
@@ -1295,17 +1280,25 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
(GenericParamDefKind::Type { .. }, GenericArg::Infer(inf)) => {
self.fcx.ty_infer(Some(param), inf.span).into()
}
- (GenericParamDefKind::Const { .. }, GenericArg::Infer(inf)) => {
+ (
+ &GenericParamDefKind::Const { has_default, is_host_effect },
+ GenericArg::Infer(inf),
+ ) => {
let tcx = self.fcx.tcx();
- self.fcx
- .ct_infer(
- tcx.type_of(param.def_id)
- .no_bound_vars()
- .expect("const parameter types cannot be generic"),
- Some(param),
- inf.span,
- )
- .into()
+
+ if has_default && is_host_effect {
+ self.fcx.var_for_effect(param)
+ } else {
+ self.fcx
+ .ct_infer(
+ tcx.type_of(param.def_id)
+ .no_bound_vars()
+ .expect("const parameter types cannot be generic"),
+ Some(param),
+ inf.span,
+ )
+ .into()
+ }
}
_ => unreachable!(),
}
@@ -1324,7 +1317,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
GenericParamDefKind::Type { has_default, .. } => {
if !infer_args && has_default {
- // If we have a default, then we it doesn't matter that we're not
+ // If we have a default, then it doesn't matter that we're not
// inferring the type arguments: we provide the default where any
// is missing.
tcx.type_of(param.def_id).instantiate(tcx, args.unwrap()).into()
@@ -1336,17 +1329,28 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.fcx.var_for_def(self.span, param)
}
}
- GenericParamDefKind::Const { has_default } => {
- if !infer_args
- && has_default
- && !tcx.has_attr(param.def_id, sym::rustc_host)
- {
- tcx.const_param_default(param.def_id)
- .instantiate(tcx, args.unwrap())
- .into()
- } else {
- self.fcx.var_for_def(self.span, param)
+ GenericParamDefKind::Const { has_default, is_host_effect } => {
+ if has_default {
+ // N.B. this is a bit of a hack. `infer_args` is passed depending on
+ // whether the user has provided generic args. E.g. for `Vec::new`
+ // we would have to infer the generic types. However, for `Vec::<T>::new`
+ // where the allocator param `A` has a default we will *not* infer. But
+ // for effect params this is a different story: if the user has not written
+ // anything explicit for the effect param, we always need to try to infer
+ // it before falling back to default, such that a `const fn` such as
+ // `needs_drop::<()>` can still be called in const contexts. (if we defaulted
+ // instead of inferred, typeck would error)
+ if is_host_effect {
+ return self.fcx.var_for_effect(param);
+ } else if !infer_args {
+ return tcx
+ .const_param_default(param.def_id)
+ .instantiate(tcx, args.unwrap())
+ .into();
+ }
}
+
+ self.fcx.var_for_def(self.span, param)
}
}
}
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs
index c44d12e61..43d4496dd 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs
@@ -1,6 +1,6 @@
use crate::FnCtxt;
use rustc_hir as hir;
-use rustc_hir::def::Res;
+use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::DefId;
use rustc_infer::{infer::type_variable::TypeVariableOriginKind, traits::ObligationCauseCode};
use rustc_middle::ty::{self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor};
@@ -20,10 +20,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
else {
return false;
};
- let hir = self.tcx.hir();
- let hir::Node::Expr(expr) = hir.get(hir_id) else {
- return false;
- };
let Some(unsubstituted_pred) = self
.tcx
@@ -37,15 +33,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
let generics = self.tcx.generics_of(def_id);
- let predicate_args = match unsubstituted_pred.kind().skip_binder() {
- ty::ClauseKind::Trait(pred) => pred.trait_ref.args.to_vec(),
- ty::ClauseKind::Projection(pred) => pred.projection_ty.args.to_vec(),
- ty::ClauseKind::ConstArgHasType(arg, ty) => {
- vec![ty.into(), arg.into()]
- }
- ty::ClauseKind::ConstEvaluatable(e) => vec![e.into()],
- _ => return false,
- };
+ let (predicate_args, predicate_self_type_to_point_at) =
+ match unsubstituted_pred.kind().skip_binder() {
+ ty::ClauseKind::Trait(pred) => {
+ (pred.trait_ref.args.to_vec(), Some(pred.self_ty().into()))
+ }
+ ty::ClauseKind::Projection(pred) => (pred.projection_ty.args.to_vec(), None),
+ ty::ClauseKind::ConstArgHasType(arg, ty) => (vec![ty.into(), arg.into()], None),
+ ty::ClauseKind::ConstEvaluatable(e) => (vec![e.into()], None),
+ _ => return false,
+ };
let find_param_matching = |matches: &dyn Fn(ty::ParamTerm) -> bool| {
predicate_args.iter().find_map(|arg| {
@@ -96,55 +93,92 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
self.find_ambiguous_parameter_in(def_id, error.root_obligation.predicate);
}
- if self.closure_span_overlaps_error(error, expr.span) {
- return false;
- }
+ let hir = self.tcx.hir();
+ let (expr, qpath) = match hir.get(hir_id) {
+ hir::Node::Expr(expr) => {
+ if self.closure_span_overlaps_error(error, expr.span) {
+ return false;
+ }
+ let qpath =
+ if let hir::ExprKind::Path(qpath) = expr.kind { Some(qpath) } else { None };
- match &expr.kind {
- hir::ExprKind::Path(qpath) => {
- if let hir::Node::Expr(hir::Expr {
- kind: hir::ExprKind::Call(callee, args),
- hir_id: call_hir_id,
- span: call_span,
- ..
- }) = hir.get_parent(expr.hir_id)
- && callee.hir_id == expr.hir_id
- {
- if self.closure_span_overlaps_error(error, *call_span) {
- return false;
- }
+ (Some(&expr.kind), qpath)
+ }
+ hir::Node::Ty(hir::Ty { kind: hir::TyKind::Path(qpath), .. }) => (None, Some(*qpath)),
+ _ => return false,
+ };
- for param in
- [param_to_point_at, fallback_param_to_point_at, self_param_to_point_at]
- .into_iter()
- .flatten()
+ if let Some(qpath) = qpath {
+ // Prefer pointing at the turbofished arg that corresponds to the
+ // self type of the failing predicate over anything else.
+ if let Some(param) = predicate_self_type_to_point_at
+ && self.point_at_path_if_possible(error, def_id, param, &qpath)
+ {
+ return true;
+ }
+
+ if let hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Call(callee, args),
+ hir_id: call_hir_id,
+ span: call_span,
+ ..
+ }) = hir.get_parent(hir_id)
+ && callee.hir_id == hir_id
+ {
+ if self.closure_span_overlaps_error(error, *call_span) {
+ return false;
+ }
+
+ for param in
+ [param_to_point_at, fallback_param_to_point_at, self_param_to_point_at]
+ .into_iter()
+ .flatten()
+ {
+ if self.blame_specific_arg_if_possible(
+ error,
+ def_id,
+ param,
+ *call_hir_id,
+ callee.span,
+ None,
+ args,
+ )
{
- if self.blame_specific_arg_if_possible(
- error,
- def_id,
- param,
- *call_hir_id,
- callee.span,
- None,
- args,
- )
- {
- return true;
- }
+ return true;
}
}
- // Notably, we only point to params that are local to the
- // item we're checking, since those are the ones we are able
- // to look in the final `hir::PathSegment` for. Everything else
- // would require a deeper search into the `qpath` than I think
- // is worthwhile.
- if let Some(param_to_point_at) = param_to_point_at
- && self.point_at_path_if_possible(error, def_id, param_to_point_at, qpath)
- {
+ }
+
+ for param in [param_to_point_at, fallback_param_to_point_at, self_param_to_point_at]
+ .into_iter()
+ .flatten()
+ {
+ if self.point_at_path_if_possible(error, def_id, param, &qpath) {
return true;
}
}
- hir::ExprKind::MethodCall(segment, receiver, args, ..) => {
+ }
+
+ match expr {
+ Some(hir::ExprKind::MethodCall(segment, receiver, args, ..)) => {
+ if let Some(param) = predicate_self_type_to_point_at
+ && self.point_at_generic_if_possible(error, def_id, param, segment)
+ {
+ // HACK: This is not correct, since `predicate_self_type_to_point_at` might
+ // not actually correspond to the receiver of the method call. But we
+ // re-adjust the cause code here in order to prefer pointing at one of
+ // the method's turbofish segments but still use `FunctionArgumentObligation`
+ // elsewhere. Hopefully this doesn't break something.
+ error.obligation.cause.map_code(|parent_code| {
+ ObligationCauseCode::FunctionArgumentObligation {
+ arg_hir_id: receiver.hir_id,
+ call_hir_id: hir_id,
+ parent_code,
+ }
+ });
+ return true;
+ }
+
for param in [param_to_point_at, fallback_param_to_point_at, self_param_to_point_at]
.into_iter()
.flatten()
@@ -166,12 +200,19 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
{
return true;
}
+ // Handle `Self` param specifically, since it's separated in
+ // the method call representation
+ if self_param_to_point_at.is_some() {
+ error.obligation.cause.span = receiver
+ .span
+ .find_ancestor_in_same_ctxt(error.obligation.cause.span)
+ .unwrap_or(receiver.span);
+ return true;
+ }
}
- hir::ExprKind::Struct(qpath, fields, ..) => {
- if let Res::Def(
- hir::def::DefKind::Struct | hir::def::DefKind::Variant,
- variant_def_id,
- ) = self.typeck_results.borrow().qpath_res(qpath, hir_id)
+ Some(hir::ExprKind::Struct(qpath, fields, ..)) => {
+ if let Res::Def(DefKind::Struct | DefKind::Variant, variant_def_id) =
+ self.typeck_results.borrow().qpath_res(qpath, hir_id)
{
for param in
[param_to_point_at, fallback_param_to_point_at, self_param_to_point_at]
@@ -193,10 +234,19 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
}
}
- if let Some(param_to_point_at) = param_to_point_at
- && self.point_at_path_if_possible(error, def_id, param_to_point_at, qpath)
+
+ for param in [
+ predicate_self_type_to_point_at,
+ param_to_point_at,
+ fallback_param_to_point_at,
+ self_param_to_point_at,
+ ]
+ .into_iter()
+ .flatten()
{
- return true;
+ if self.point_at_path_if_possible(error, def_id, param, qpath) {
+ return true;
+ }
}
}
_ => {}
@@ -213,17 +263,43 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
qpath: &hir::QPath<'tcx>,
) -> bool {
match qpath {
- hir::QPath::Resolved(_, path) => {
- if let Some(segment) = path.segments.last()
- && self.point_at_generic_if_possible(error, def_id, param, segment)
+ hir::QPath::Resolved(self_ty, path) => {
+ for segment in path.segments.iter().rev() {
+ if let Res::Def(kind, def_id) = segment.res
+ && !matches!(kind, DefKind::Mod | DefKind::ForeignMod)
+ && self.point_at_generic_if_possible(error, def_id, param, segment)
+ {
+ return true;
+ }
+ }
+ // Handle `Self` param specifically, since it's separated in
+ // the path representation
+ if let Some(self_ty) = self_ty
+ && let ty::GenericArgKind::Type(ty) = param.unpack()
+ && ty == self.tcx.types.self_param
{
+ error.obligation.cause.span = self_ty
+ .span
+ .find_ancestor_in_same_ctxt(error.obligation.cause.span)
+ .unwrap_or(self_ty.span);
return true;
}
}
- hir::QPath::TypeRelative(_, segment) => {
+ hir::QPath::TypeRelative(self_ty, segment) => {
if self.point_at_generic_if_possible(error, def_id, param, segment) {
return true;
}
+ // Handle `Self` param specifically, since it's separated in
+ // the path representation
+ if let ty::GenericArgKind::Type(ty) = param.unpack()
+ && ty == self.tcx.types.self_param
+ {
+ error.obligation.cause.span = self_ty
+ .span
+ .find_ancestor_in_same_ctxt(error.obligation.cause.span)
+ .unwrap_or(self_ty.span);
+ return true;
+ }
}
_ => {}
}
@@ -398,7 +474,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
/**
- * Recursively searches for the most-specific blamable expression.
+ * Recursively searches for the most-specific blameable expression.
* For example, if you have a chain of constraints like:
* - want `Vec<i32>: Copy`
* - because `Option<Vec<i32>>: Copy` needs `Vec<i32>: Copy` because `impl <T: Copy> Copy for Option<T>`
@@ -618,14 +694,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
let variant_def_id = match expr_struct_def_kind {
- hir::def::DefKind::Struct => {
+ DefKind::Struct => {
if in_ty_adt.did() != expr_struct_def_id {
// FIXME: Deal with type aliases?
return Err(expr);
}
expr_struct_def_id
}
- hir::def::DefKind::Variant => {
+ DefKind::Variant => {
// If this is a variant, its parent is the type definition.
if in_ty_adt.did() != self.tcx.parent(expr_struct_def_id) {
// FIXME: Deal with type aliases?
@@ -727,14 +803,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
};
let variant_def_id = match expr_struct_def_kind {
- hir::def::DefKind::Ctor(hir::def::CtorOf::Struct, hir::def::CtorKind::Fn) => {
+ DefKind::Ctor(hir::def::CtorOf::Struct, hir::def::CtorKind::Fn) => {
if in_ty_adt.did() != self.tcx.parent(expr_ctor_def_id) {
// FIXME: Deal with type aliases?
return Err(expr);
}
self.tcx.parent(expr_ctor_def_id)
}
- hir::def::DefKind::Ctor(hir::def::CtorOf::Variant, hir::def::CtorKind::Fn) => {
+ DefKind::Ctor(hir::def::CtorOf::Variant, hir::def::CtorKind::Fn) => {
// For a typical enum like
// `enum Blah<T> { Variant(T) }`
// we get the following resolutions:
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
index 4def78673..c0332a48b 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
@@ -273,11 +273,20 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
//
// This check is here because there is currently no way to express a trait bound for `FnDef` types only.
if is_const_eval_select && (1..=2).contains(&idx) {
- if let ty::FnDef(def_id, _) = checked_ty.kind() {
- if idx == 1 && !self.tcx.is_const_fn_raw(*def_id) {
- self.tcx
- .sess
- .emit_err(errors::ConstSelectMustBeConst { span: provided_arg.span });
+ if let ty::FnDef(def_id, args) = *checked_ty.kind() {
+ if idx == 1 {
+ if !self.tcx.is_const_fn_raw(def_id) {
+ self.tcx.sess.emit_err(errors::ConstSelectMustBeConst {
+ span: provided_arg.span,
+ });
+ } else {
+ self.enforce_context_effects(
+ provided_arg.hir_id,
+ provided_arg.span,
+ def_id,
+ args,
+ )
+ }
}
} else {
self.tcx.sess.emit_err(errors::ConstSelectMustBeFn {
@@ -1361,10 +1370,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
_ => bug!("unexpected type: {:?}", ty.normalized),
},
- Res::Def(
- DefKind::Struct | DefKind::Union | DefKind::TyAlias { .. } | DefKind::AssocTy,
- _,
- )
+ Res::Def(DefKind::Struct | DefKind::Union | DefKind::TyAlias | DefKind::AssocTy, _)
| Res::SelfTyParam { .. }
| Res::SelfTyAlias { .. } => match ty.normalized.ty_adt_def() {
Some(adt) if !adt.is_enum() => {
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs
index 6a82b0021..4a245d30c 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs
@@ -266,7 +266,14 @@ impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> {
param: Option<&ty::GenericParamDef>,
span: Span,
) -> Const<'tcx> {
+ // FIXME ideally this shouldn't use unwrap
match param {
+ Some(
+ param @ ty::GenericParamDef {
+ kind: ty::GenericParamDefKind::Const { is_host_effect: true, .. },
+ ..
+ },
+ ) => self.var_for_effect(param).as_const().unwrap(),
Some(param) => self.var_for_def(span, param).as_const().unwrap(),
None => self.next_const_var(
ty,
@@ -317,7 +324,21 @@ impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> {
fn record_ty(&self, hir_id: hir::HirId, ty: Ty<'tcx>, span: Span) {
// FIXME: normalization and escaping regions
- let ty = if !ty.has_escaping_bound_vars() { self.normalize(span, ty) } else { ty };
+ let ty = if !ty.has_escaping_bound_vars() {
+ // NOTE: These obligations are 100% redundant and are implied by
+ // WF obligations that are registered elsewhere, but they have a
+ // better cause code assigned to them in `add_required_obligations_for_hir`.
+ // This means that they should shadow obligations with worse spans.
+ if let ty::Alias(ty::Projection | ty::Weak, ty::AliasTy { args, def_id, .. }) =
+ ty.kind()
+ {
+ self.add_required_obligations_for_hir(span, *def_id, args, hir_id);
+ }
+
+ self.normalize(span, ty)
+ } else {
+ ty
+ };
self.write_ty(hir_id, ty)
}
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs
index d2a53ee8b..abb689892 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs
@@ -65,6 +65,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let expr = expr.peel_drop_temps();
self.suggest_missing_semicolon(err, expr, expected, false);
let mut pointing_at_return_type = false;
+ if let hir::ExprKind::Break(..) = expr.kind {
+ // `break` type mismatches provide better context for tail `loop` expressions.
+ return false;
+ }
if let Some((fn_id, fn_decl, can_suggest)) = self.get_fn_decl(blk_id) {
pointing_at_return_type = self.suggest_missing_return_type(
err,
@@ -987,10 +991,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let bound_vars = self.tcx.late_bound_vars(fn_id);
let ty = self.tcx.erase_late_bound_regions(Binder::bind_with_vars(ty, bound_vars));
let ty = match self.tcx.asyncness(fn_id.owner) {
- hir::IsAsync::Async => self.get_impl_future_output_ty(ty).unwrap_or_else(|| {
+ ty::Asyncness::Yes => self.get_impl_future_output_ty(ty).unwrap_or_else(|| {
span_bug!(fn_decl.output.span(), "failed to get output type of async function")
}),
- hir::IsAsync::NotAsync => ty,
+ ty::Asyncness::No => ty,
};
let ty = self.normalize(expr.span, ty);
if self.can_coerce(found, ty) {
diff --git a/compiler/rustc_hir_typeck/src/gather_locals.rs b/compiler/rustc_hir_typeck/src/gather_locals.rs
index ed4c63f17..0ad2c1d92 100644
--- a/compiler/rustc_hir_typeck/src/gather_locals.rs
+++ b/compiler/rustc_hir_typeck/src/gather_locals.rs
@@ -50,7 +50,7 @@ impl<'a> From<&'a hir::Local<'a>> for Declaration<'a> {
impl<'a> From<&'a hir::Let<'a>> for Declaration<'a> {
fn from(let_expr: &'a hir::Let<'a>) -> Self {
- let hir::Let { hir_id, pat, ty, span, init } = *let_expr;
+ let hir::Let { hir_id, pat, ty, span, init, is_recovered: _ } = *let_expr;
Declaration { hir_id, pat, ty, span, init: Some(init), origin: DeclOrigin::LetExpr }
}
}
diff --git a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_build.rs b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_build.rs
deleted file mode 100644
index cfedcee99..000000000
--- a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_build.rs
+++ /dev/null
@@ -1,601 +0,0 @@
-use super::{
- for_each_consumable, record_consumed_borrow::ConsumedAndBorrowedPlaces, DropRangesBuilder,
- NodeInfo, PostOrderId, TrackedValue, TrackedValueIndex,
-};
-use hir::{
- intravisit::{self, Visitor},
- Body, Expr, ExprKind, Guard, HirId, LoopIdError,
-};
-use rustc_data_structures::unord::{UnordMap, UnordSet};
-use rustc_hir as hir;
-use rustc_index::IndexVec;
-use rustc_infer::infer::InferCtxt;
-use rustc_middle::{
- hir::map::Map,
- ty::{ParamEnv, TyCtxt, TypeVisitableExt, TypeckResults},
-};
-use std::mem::swap;
-
-/// Traverses the body to find the control flow graph and locations for the
-/// relevant places are dropped or reinitialized.
-///
-/// The resulting structure still needs to be iterated to a fixed point, which
-/// can be done with propagate_to_fixpoint in cfg_propagate.
-pub(super) fn build_control_flow_graph<'tcx>(
- infcx: &InferCtxt<'tcx>,
- typeck_results: &TypeckResults<'tcx>,
- param_env: ParamEnv<'tcx>,
- consumed_borrowed_places: ConsumedAndBorrowedPlaces,
- body: &'tcx Body<'tcx>,
- num_exprs: usize,
-) -> (DropRangesBuilder, UnordSet<HirId>) {
- let mut drop_range_visitor = DropRangeVisitor::new(
- infcx,
- typeck_results,
- param_env,
- consumed_borrowed_places,
- num_exprs,
- );
- intravisit::walk_body(&mut drop_range_visitor, body);
-
- drop_range_visitor.drop_ranges.process_deferred_edges();
- if let Some(filename) = &infcx.tcx.sess.opts.unstable_opts.dump_drop_tracking_cfg {
- super::cfg_visualize::write_graph_to_file(
- &drop_range_visitor.drop_ranges,
- filename,
- infcx.tcx,
- );
- }
-
- (drop_range_visitor.drop_ranges, drop_range_visitor.places.borrowed_temporaries)
-}
-
-/// This struct is used to gather the information for `DropRanges` to determine the regions of the
-/// HIR tree for which a value is dropped.
-///
-/// We are interested in points where a variables is dropped or initialized, and the control flow
-/// of the code. We identify locations in code by their post-order traversal index, so it is
-/// important for this traversal to match that in `RegionResolutionVisitor` and `InteriorVisitor`.
-///
-/// We make several simplifying assumptions, with the goal of being more conservative than
-/// necessary rather than less conservative (since being less conservative is unsound, but more
-/// conservative is still safe). These assumptions are:
-///
-/// 1. Moving a variable `a` counts as a move of the whole variable.
-/// 2. Moving a partial path like `a.b.c` is ignored.
-/// 3. Reinitializing through a field (e.g. `a.b.c = 5`) counts as a reinitialization of all of
-/// `a`.
-///
-/// Some examples:
-///
-/// Rule 1:
-/// ```rust
-/// let mut a = (vec![0], vec![0]);
-/// drop(a);
-/// // `a` is not considered initialized.
-/// ```
-///
-/// Rule 2:
-/// ```rust
-/// let mut a = (vec![0], vec![0]);
-/// drop(a.0);
-/// drop(a.1);
-/// // `a` is still considered initialized.
-/// ```
-///
-/// Rule 3:
-/// ```compile_fail,E0382
-/// let mut a = (vec![0], vec![0]);
-/// drop(a);
-/// a.1 = vec![1];
-/// // all of `a` is considered initialized
-/// ```
-
-struct DropRangeVisitor<'a, 'tcx> {
- typeck_results: &'a TypeckResults<'tcx>,
- infcx: &'a InferCtxt<'tcx>,
- param_env: ParamEnv<'tcx>,
- places: ConsumedAndBorrowedPlaces,
- drop_ranges: DropRangesBuilder,
- expr_index: PostOrderId,
- label_stack: Vec<(Option<rustc_ast::Label>, PostOrderId)>,
-}
-
-impl<'a, 'tcx> DropRangeVisitor<'a, 'tcx> {
- fn new(
- infcx: &'a InferCtxt<'tcx>,
- typeck_results: &'a TypeckResults<'tcx>,
- param_env: ParamEnv<'tcx>,
- places: ConsumedAndBorrowedPlaces,
- num_exprs: usize,
- ) -> Self {
- debug!("consumed_places: {:?}", places.consumed);
- let drop_ranges = DropRangesBuilder::new(
- places.consumed.iter().flat_map(|(_, places)| places.iter().cloned()),
- infcx.tcx.hir(),
- num_exprs,
- );
- Self {
- infcx,
- typeck_results,
- param_env,
- places,
- drop_ranges,
- expr_index: PostOrderId::from_u32(0),
- label_stack: vec![],
- }
- }
-
- fn tcx(&self) -> TyCtxt<'tcx> {
- self.infcx.tcx
- }
-
- fn record_drop(&mut self, value: TrackedValue) {
- if self.places.borrowed.contains(&value) {
- debug!("not marking {:?} as dropped because it is borrowed at some point", value);
- } else {
- debug!("marking {:?} as dropped at {:?}", value, self.expr_index);
- let count = self.expr_index;
- self.drop_ranges.drop_at(value, count);
- }
- }
-
- /// ExprUseVisitor's consume callback doesn't go deep enough for our purposes in all
- /// expressions. This method consumes a little deeper into the expression when needed.
- fn consume_expr(&mut self, expr: &hir::Expr<'_>) {
- debug!("consuming expr {:?}, count={:?}", expr.kind, self.expr_index);
- let places = self
- .places
- .consumed
- .get(&expr.hir_id)
- .map_or(vec![], |places| places.iter().cloned().collect());
- for place in places {
- trace!(?place, "consuming place");
- for_each_consumable(self.tcx().hir(), place, |value| self.record_drop(value));
- }
- }
-
- /// Marks an expression as being reinitialized.
- ///
- /// Note that we always approximated on the side of things being more
- /// initialized than they actually are, as opposed to less. In cases such
- /// as `x.y = ...`, we would consider all of `x` as being initialized
- /// instead of just the `y` field.
- ///
- /// This is because it is always safe to consider something initialized
- /// even when it is not, but the other way around will cause problems.
- ///
- /// In the future, we will hopefully tighten up these rules to be more
- /// precise.
- fn reinit_expr(&mut self, expr: &hir::Expr<'_>) {
- // Walk the expression to find the base. For example, in an expression
- // like `*a[i].x`, we want to find the `a` and mark that as
- // reinitialized.
- match expr.kind {
- ExprKind::Path(hir::QPath::Resolved(
- _,
- hir::Path { res: hir::def::Res::Local(hir_id), .. },
- )) => {
- // This is the base case, where we have found an actual named variable.
-
- let location = self.expr_index;
- debug!("reinitializing {:?} at {:?}", hir_id, location);
- self.drop_ranges.reinit_at(TrackedValue::Variable(*hir_id), location);
- }
-
- ExprKind::Field(base, _) => self.reinit_expr(base),
-
- // Most expressions do not refer to something where we need to track
- // reinitializations.
- //
- // Some of these may be interesting in the future
- ExprKind::Path(..)
- | ExprKind::ConstBlock(..)
- | ExprKind::Array(..)
- | ExprKind::Call(..)
- | ExprKind::MethodCall(..)
- | ExprKind::Tup(..)
- | ExprKind::Binary(..)
- | ExprKind::Unary(..)
- | ExprKind::Lit(..)
- | ExprKind::Cast(..)
- | ExprKind::Type(..)
- | ExprKind::DropTemps(..)
- | ExprKind::Let(..)
- | ExprKind::If(..)
- | ExprKind::Loop(..)
- | ExprKind::Match(..)
- | ExprKind::Closure { .. }
- | ExprKind::Block(..)
- | ExprKind::Assign(..)
- | ExprKind::AssignOp(..)
- | ExprKind::Index(..)
- | ExprKind::AddrOf(..)
- | ExprKind::Break(..)
- | ExprKind::Continue(..)
- | ExprKind::Ret(..)
- | ExprKind::Become(..)
- | ExprKind::InlineAsm(..)
- | ExprKind::OffsetOf(..)
- | ExprKind::Struct(..)
- | ExprKind::Repeat(..)
- | ExprKind::Yield(..)
- | ExprKind::Err(_) => (),
- }
- }
-
- /// For an expression with an uninhabited return type (e.g. a function that returns !),
- /// this adds a self edge to the CFG to model the fact that the function does not
- /// return.
- fn handle_uninhabited_return(&mut self, expr: &Expr<'tcx>) {
- let ty = self.typeck_results.expr_ty(expr);
- let ty = self.infcx.resolve_vars_if_possible(ty);
- if ty.has_non_region_infer() {
- self.tcx()
- .sess
- .delay_span_bug(expr.span, format!("could not resolve infer vars in `{ty}`"));
- return;
- }
- let ty = self.tcx().erase_regions(ty);
- let m = self.tcx().parent_module(expr.hir_id).to_def_id();
- if !ty.is_inhabited_from(self.tcx(), m, self.param_env) {
- // This function will not return. We model this fact as an infinite loop.
- self.drop_ranges.add_control_edge(self.expr_index + 1, self.expr_index + 1);
- }
- }
-
- /// Map a Destination to an equivalent expression node
- ///
- /// The destination field of a Break or Continue expression can target either an
- /// expression or a block. The drop range analysis, however, only deals in
- /// expression nodes, so blocks that might be the destination of a Break or Continue
- /// will not have a PostOrderId.
- ///
- /// If the destination is an expression, this function will simply return that expression's
- /// hir_id. If the destination is a block, this function will return the hir_id of last
- /// expression in the block.
- fn find_target_expression_from_destination(
- &self,
- destination: hir::Destination,
- ) -> Result<HirId, LoopIdError> {
- destination.target_id.map(|target| {
- let node = self.tcx().hir().get(target);
- match node {
- hir::Node::Expr(_) => target,
- hir::Node::Block(b) => find_last_block_expression(b),
- hir::Node::Param(..)
- | hir::Node::Item(..)
- | hir::Node::ForeignItem(..)
- | hir::Node::TraitItem(..)
- | hir::Node::ImplItem(..)
- | hir::Node::Variant(..)
- | hir::Node::Field(..)
- | hir::Node::AnonConst(..)
- | hir::Node::ConstBlock(..)
- | hir::Node::Stmt(..)
- | hir::Node::PathSegment(..)
- | hir::Node::Ty(..)
- | hir::Node::TypeBinding(..)
- | hir::Node::TraitRef(..)
- | hir::Node::Pat(..)
- | hir::Node::PatField(..)
- | hir::Node::ExprField(..)
- | hir::Node::Arm(..)
- | hir::Node::Local(..)
- | hir::Node::Ctor(..)
- | hir::Node::Lifetime(..)
- | hir::Node::GenericParam(..)
- | hir::Node::Crate(..)
- | hir::Node::Infer(..) => bug!("Unsupported branch target: {:?}", node),
- }
- })
- }
-}
-
-fn find_last_block_expression(block: &hir::Block<'_>) -> HirId {
- block.expr.map_or_else(
- // If there is no tail expression, there will be at least one statement in the
- // block because the block contains a break or continue statement.
- || block.stmts.last().unwrap().hir_id,
- |expr| expr.hir_id,
- )
-}
-
-impl<'a, 'tcx> Visitor<'tcx> for DropRangeVisitor<'a, 'tcx> {
- fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
- let mut reinit = None;
- match expr.kind {
- ExprKind::Assign(lhs, rhs, _) => {
- self.visit_expr(rhs);
- self.visit_expr(lhs);
-
- reinit = Some(lhs);
- }
-
- ExprKind::If(test, if_true, if_false) => {
- self.visit_expr(test);
-
- let fork = self.expr_index;
-
- self.drop_ranges.add_control_edge(fork, self.expr_index + 1);
- self.visit_expr(if_true);
- let true_end = self.expr_index;
-
- self.drop_ranges.add_control_edge(fork, self.expr_index + 1);
- if let Some(if_false) = if_false {
- self.visit_expr(if_false);
- }
-
- self.drop_ranges.add_control_edge(true_end, self.expr_index + 1);
- }
- ExprKind::Match(scrutinee, arms, ..) => {
- // We walk through the match expression almost like a chain of if expressions.
- // Here's a diagram to follow along with:
- //
- // ┌─┐
- // match │A│ {
- // ┌───┴─┘
- // │
- // ┌▼┌───►┌─┐ ┌─┐
- // │B│ if │C│ =>│D│,
- // └─┘ ├─┴──►└─┴──────┐
- // ┌──┘ │
- // ┌──┘ │
- // │ │
- // ┌▼┌───►┌─┐ ┌─┐ │
- // │E│ if │F│ =>│G│, │
- // └─┘ ├─┴──►└─┴┐ │
- // │ │ │
- // } ▼ ▼ │
- // ┌─┐◄───────────────────┘
- // │H│
- // └─┘
- //
- // The order we want is that the scrutinee (A) flows into the first pattern (B),
- // which flows into the guard (C). Then the guard either flows into the arm body
- // (D) or into the start of the next arm (E). Finally, the body flows to the end
- // of the match block (H).
- //
- // The subsequent arms follow the same ordering. First we go to the pattern, then
- // the guard (if present, otherwise it flows straight into the body), then into
- // the body and then to the end of the match expression.
- //
- // The comments below show which edge is being added.
- self.visit_expr(scrutinee);
-
- let (guard_exit, arm_end_ids) = arms.iter().fold(
- (self.expr_index, vec![]),
- |(incoming_edge, mut arm_end_ids), hir::Arm { pat, body, guard, .. }| {
- // A -> B, or C -> E
- self.drop_ranges.add_control_edge(incoming_edge, self.expr_index + 1);
- self.visit_pat(pat);
- // B -> C and E -> F are added implicitly due to the traversal order.
- match guard {
- Some(Guard::If(expr)) => self.visit_expr(expr),
- Some(Guard::IfLet(let_expr)) => {
- self.visit_let_expr(let_expr);
- }
- None => (),
- }
- // Likewise, C -> D and F -> G are added implicitly.
-
- // Save C, F, so we can add the other outgoing edge.
- let to_next_arm = self.expr_index;
-
- // The default edge does not get added since we also have an explicit edge,
- // so we also need to add an edge to the next node as well.
- //
- // This adds C -> D, F -> G
- self.drop_ranges.add_control_edge(self.expr_index, self.expr_index + 1);
- self.visit_expr(body);
-
- // Save the end of the body so we can add the exit edge once we know where
- // the exit is.
- arm_end_ids.push(self.expr_index);
-
- // Pass C to the next iteration, as well as vec![D]
- //
- // On the last round through, we pass F and vec![D, G] so that we can
- // add all the exit edges.
- (to_next_arm, arm_end_ids)
- },
- );
- // F -> H
- self.drop_ranges.add_control_edge(guard_exit, self.expr_index + 1);
-
- arm_end_ids.into_iter().for_each(|arm_end| {
- // D -> H, G -> H
- self.drop_ranges.add_control_edge(arm_end, self.expr_index + 1)
- });
- }
-
- ExprKind::Loop(body, label, ..) => {
- let loop_begin = self.expr_index + 1;
- self.label_stack.push((label, loop_begin));
- if body.stmts.is_empty() && body.expr.is_none() {
- // For empty loops we won't have updated self.expr_index after visiting the
- // body, meaning we'd get an edge from expr_index to expr_index + 1, but
- // instead we want an edge from expr_index + 1 to expr_index + 1.
- self.drop_ranges.add_control_edge(loop_begin, loop_begin);
- } else {
- self.visit_block(body);
- self.drop_ranges.add_control_edge(self.expr_index, loop_begin);
- }
- self.label_stack.pop();
- }
- // Find the loop entry by searching through the label stack for either the last entry
- // (if label is none), or the first entry where the label matches this one. The Loop
- // case maintains this stack mapping labels to the PostOrderId for the loop entry.
- ExprKind::Continue(hir::Destination { label, .. }, ..) => self
- .label_stack
- .iter()
- .rev()
- .find(|(loop_label, _)| label.is_none() || *loop_label == label)
- .map_or((), |(_, target)| {
- self.drop_ranges.add_control_edge(self.expr_index, *target)
- }),
-
- ExprKind::Break(destination, value) => {
- // destination either points to an expression or to a block. We use
- // find_target_expression_from_destination to use the last expression of the block
- // if destination points to a block.
- //
- // We add an edge to the hir_id of the expression/block we are breaking out of, and
- // then in process_deferred_edges we will map this hir_id to its PostOrderId, which
- // will refer to the end of the block due to the post order traversal.
- if let Ok(target) = self.find_target_expression_from_destination(destination) {
- self.drop_ranges.add_control_edge_hir_id(self.expr_index, target)
- }
-
- if let Some(value) = value {
- self.visit_expr(value);
- }
- }
-
- ExprKind::Become(_call) => bug!("encountered a tail-call inside a generator"),
-
- ExprKind::Call(f, args) => {
- self.visit_expr(f);
- for arg in args {
- self.visit_expr(arg);
- }
-
- self.handle_uninhabited_return(expr);
- }
- ExprKind::MethodCall(_, receiver, exprs, _) => {
- self.visit_expr(receiver);
- for expr in exprs {
- self.visit_expr(expr);
- }
-
- self.handle_uninhabited_return(expr);
- }
-
- ExprKind::AddrOf(..)
- | ExprKind::Array(..)
- // FIXME(eholk): We probably need special handling for AssignOps. The ScopeTree builder
- // in region.rs runs both lhs then rhs and rhs then lhs and then sets all yields to be
- // the latest they show up in either traversal. With the older scope-based
- // approximation, this was fine, but it's probably not right now. What we probably want
- // to do instead is still run both orders, but consider anything that showed up as a
- // yield in either order.
- | ExprKind::AssignOp(..)
- | ExprKind::Binary(..)
- | ExprKind::Block(..)
- | ExprKind::Cast(..)
- | ExprKind::Closure { .. }
- | ExprKind::ConstBlock(..)
- | ExprKind::DropTemps(..)
- | ExprKind::Err(_)
- | ExprKind::Field(..)
- | ExprKind::Index(..)
- | ExprKind::InlineAsm(..)
- | ExprKind::OffsetOf(..)
- | ExprKind::Let(..)
- | ExprKind::Lit(..)
- | ExprKind::Path(..)
- | ExprKind::Repeat(..)
- | ExprKind::Ret(..)
- | ExprKind::Struct(..)
- | ExprKind::Tup(..)
- | ExprKind::Type(..)
- | ExprKind::Unary(..)
- | ExprKind::Yield(..) => intravisit::walk_expr(self, expr),
- }
-
- self.expr_index = self.expr_index + 1;
- self.drop_ranges.add_node_mapping(expr.hir_id, self.expr_index);
- self.consume_expr(expr);
- if let Some(expr) = reinit {
- self.reinit_expr(expr);
- }
- }
-
- fn visit_pat(&mut self, pat: &'tcx hir::Pat<'tcx>) {
- intravisit::walk_pat(self, pat);
-
- // Increment expr_count here to match what InteriorVisitor expects.
- self.expr_index = self.expr_index + 1;
-
- // Save a node mapping to get better CFG visualization
- self.drop_ranges.add_node_mapping(pat.hir_id, self.expr_index);
- }
-}
-
-impl DropRangesBuilder {
- fn new(
- tracked_values: impl Iterator<Item = TrackedValue>,
- hir: Map<'_>,
- num_exprs: usize,
- ) -> Self {
- let mut tracked_value_map = UnordMap::<_, TrackedValueIndex>::default();
- let mut next = <_>::from(0u32);
- for value in tracked_values {
- for_each_consumable(hir, value, |value| {
- if let std::collections::hash_map::Entry::Vacant(e) = tracked_value_map.entry(value)
- {
- e.insert(next);
- next = next + 1;
- }
- });
- }
- debug!("hir_id_map: {:#?}", tracked_value_map);
- let num_values = tracked_value_map.len();
- Self {
- tracked_value_map,
- nodes: IndexVec::from_fn_n(|_| NodeInfo::new(num_values), num_exprs + 1),
- deferred_edges: <_>::default(),
- post_order_map: <_>::default(),
- }
- }
-
- fn tracked_value_index(&self, tracked_value: TrackedValue) -> TrackedValueIndex {
- *self.tracked_value_map.get(&tracked_value).unwrap()
- }
-
- /// Adds an entry in the mapping from HirIds to PostOrderIds
- ///
- /// Needed so that `add_control_edge_hir_id` can work.
- fn add_node_mapping(&mut self, node_hir_id: HirId, post_order_id: PostOrderId) {
- self.post_order_map.insert(node_hir_id, post_order_id);
- }
-
- /// Like add_control_edge, but uses a hir_id as the target.
- ///
- /// This can be used for branches where we do not know the PostOrderId of the target yet,
- /// such as when handling `break` or `continue`.
- fn add_control_edge_hir_id(&mut self, from: PostOrderId, to: HirId) {
- self.deferred_edges.push((from, to));
- }
-
- fn drop_at(&mut self, value: TrackedValue, location: PostOrderId) {
- let value = self.tracked_value_index(value);
- self.node_mut(location).drops.push(value);
- }
-
- fn reinit_at(&mut self, value: TrackedValue, location: PostOrderId) {
- let value = match self.tracked_value_map.get(&value) {
- Some(value) => *value,
- // If there's no value, this is never consumed and therefore is never dropped. We can
- // ignore this.
- None => return,
- };
- self.node_mut(location).reinits.push(value);
- }
-
- /// Looks up PostOrderId for any control edges added by HirId and adds a proper edge for them.
- ///
- /// Should be called after visiting the HIR but before solving the control flow, otherwise some
- /// edges will be missed.
- fn process_deferred_edges(&mut self) {
- trace!("processing deferred edges. post_order_map={:#?}", self.post_order_map);
- let mut edges = vec![];
- swap(&mut edges, &mut self.deferred_edges);
- edges.into_iter().for_each(|(from, to)| {
- trace!("Adding deferred edge from {:?} to {:?}", from, to);
- let to = *self.post_order_map.get(&to).expect("Expression ID not found");
- trace!("target edge PostOrderId={:?}", to);
- self.add_control_edge(from, to)
- });
- }
-}
diff --git a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_propagate.rs b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_propagate.rs
deleted file mode 100644
index 633b47889..000000000
--- a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_propagate.rs
+++ /dev/null
@@ -1,92 +0,0 @@
-use super::{DropRangesBuilder, PostOrderId};
-use rustc_index::{bit_set::BitSet, IndexVec};
-use std::collections::BTreeMap;
-
-impl DropRangesBuilder {
- pub fn propagate_to_fixpoint(&mut self) {
- trace!("before fixpoint: {:#?}", self);
- let preds = self.compute_predecessors();
-
- trace!("predecessors: {:#?}", preds.iter_enumerated().collect::<BTreeMap<_, _>>());
-
- let mut new_state = BitSet::new_empty(self.num_values());
- let mut changed_nodes = BitSet::new_empty(self.nodes.len());
- let mut unchanged_mask = BitSet::new_filled(self.nodes.len());
- changed_nodes.insert(0u32.into());
-
- let mut propagate = || {
- let mut changed = false;
- unchanged_mask.insert_all();
- for id in self.nodes.indices() {
- trace!("processing {:?}, changed_nodes: {:?}", id, changed_nodes);
- // Check if any predecessor has changed, and if not then short-circuit.
- //
- // We handle the start node specially, since it doesn't have any predecessors,
- // but we need to start somewhere.
- if match id.index() {
- 0 => !changed_nodes.contains(id),
- _ => !preds[id].iter().any(|pred| changed_nodes.contains(*pred)),
- } {
- trace!("short-circuiting because none of {:?} have changed", preds[id]);
- unchanged_mask.remove(id);
- continue;
- }
-
- if id.index() == 0 {
- new_state.clear();
- } else {
- // If we are not the start node and we have no predecessors, treat
- // everything as dropped because there's no way to get here anyway.
- new_state.insert_all();
- };
-
- for pred in &preds[id] {
- new_state.intersect(&self.nodes[*pred].drop_state);
- }
-
- for drop in &self.nodes[id].drops {
- new_state.insert(*drop);
- }
-
- for reinit in &self.nodes[id].reinits {
- new_state.remove(*reinit);
- }
-
- if self.nodes[id].drop_state.intersect(&new_state) {
- changed_nodes.insert(id);
- changed = true;
- } else {
- unchanged_mask.remove(id);
- }
- }
-
- changed_nodes.intersect(&unchanged_mask);
- changed
- };
-
- while propagate() {
- trace!("drop_state changed, re-running propagation");
- }
-
- trace!("after fixpoint: {:#?}", self);
- }
-
- fn compute_predecessors(&self) -> IndexVec<PostOrderId, Vec<PostOrderId>> {
- let mut preds = IndexVec::from_fn_n(|_| vec![], self.nodes.len());
- for (id, node) in self.nodes.iter_enumerated() {
- // If the node has no explicit successors, we assume that control
- // will from this node into the next one.
- //
- // If there are successors listed, then we assume that all
- // possible successors are given and we do not include the default.
- if node.successors.len() == 0 && id.index() != self.nodes.len() - 1 {
- preds[id + 1].push(id);
- } else {
- for succ in &node.successors {
- preds[*succ].push(id);
- }
- }
- }
- preds
- }
-}
diff --git a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_visualize.rs b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_visualize.rs
deleted file mode 100644
index e8d31be79..000000000
--- a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/cfg_visualize.rs
+++ /dev/null
@@ -1,96 +0,0 @@
-//! Implementation of GraphWalk for DropRanges so we can visualize the control
-//! flow graph when needed for debugging.
-
-use rustc_graphviz as dot;
-use rustc_hir::{Expr, ExprKind, Node};
-use rustc_middle::ty::TyCtxt;
-
-use super::{DropRangesBuilder, PostOrderId};
-
-/// Writes the CFG for DropRangesBuilder to a .dot file for visualization.
-///
-/// It is not normally called, but is kept around to easily add debugging
-/// code when needed.
-pub(super) fn write_graph_to_file(
- drop_ranges: &DropRangesBuilder,
- filename: &str,
- tcx: TyCtxt<'_>,
-) {
- dot::render(
- &DropRangesGraph { drop_ranges, tcx },
- &mut std::fs::File::create(filename).unwrap(),
- )
- .unwrap();
-}
-
-struct DropRangesGraph<'a, 'tcx> {
- drop_ranges: &'a DropRangesBuilder,
- tcx: TyCtxt<'tcx>,
-}
-
-impl<'a> dot::GraphWalk<'a> for DropRangesGraph<'_, '_> {
- type Node = PostOrderId;
-
- type Edge = (PostOrderId, PostOrderId);
-
- fn nodes(&'a self) -> dot::Nodes<'a, Self::Node> {
- self.drop_ranges.nodes.iter_enumerated().map(|(i, _)| i).collect()
- }
-
- fn edges(&'a self) -> dot::Edges<'a, Self::Edge> {
- self.drop_ranges
- .nodes
- .iter_enumerated()
- .flat_map(|(i, node)| {
- if node.successors.len() == 0 {
- vec![(i, i + 1)]
- } else {
- node.successors.iter().map(move |&s| (i, s)).collect()
- }
- })
- .collect()
- }
-
- fn source(&'a self, edge: &Self::Edge) -> Self::Node {
- edge.0
- }
-
- fn target(&'a self, edge: &Self::Edge) -> Self::Node {
- edge.1
- }
-}
-
-impl<'a> dot::Labeller<'a> for DropRangesGraph<'_, '_> {
- type Node = PostOrderId;
-
- type Edge = (PostOrderId, PostOrderId);
-
- fn graph_id(&'a self) -> dot::Id<'a> {
- dot::Id::new("drop_ranges").unwrap()
- }
-
- fn node_id(&'a self, n: &Self::Node) -> dot::Id<'a> {
- dot::Id::new(format!("id{}", n.index())).unwrap()
- }
-
- fn node_label(&'a self, n: &Self::Node) -> dot::LabelText<'a> {
- dot::LabelText::LabelStr(
- format!(
- "{n:?}: {}",
- self.drop_ranges
- .post_order_map
- .iter()
- .find(|(_hir_id, &post_order_id)| post_order_id == *n)
- .map_or("<unknown>".into(), |(hir_id, _)| format!(
- "{}{}",
- self.tcx.hir().node_to_string(*hir_id),
- match self.tcx.hir().find(*hir_id) {
- Some(Node::Expr(Expr { kind: ExprKind::Yield(..), .. })) => " (yield)",
- _ => "",
- }
- ))
- )
- .into(),
- )
- }
-}
diff --git a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/mod.rs b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/mod.rs
deleted file mode 100644
index e563bd40b..000000000
--- a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/mod.rs
+++ /dev/null
@@ -1,306 +0,0 @@
-//! Drop range analysis finds the portions of the tree where a value is guaranteed to be dropped
-//! (i.e. moved, uninitialized, etc.). This is used to exclude the types of those values from the
-//! generator type. See `InteriorVisitor::record` for where the results of this analysis are used.
-//!
-//! There are three phases to this analysis:
-//! 1. Use `ExprUseVisitor` to identify the interesting values that are consumed and borrowed.
-//! 2. Use `DropRangeVisitor` to find where the interesting values are dropped or reinitialized,
-//! and also build a control flow graph.
-//! 3. Use `DropRanges::propagate_to_fixpoint` to flow the dropped/reinitialized information through
-//! the CFG and find the exact points where we know a value is definitely dropped.
-//!
-//! The end result is a data structure that maps the post-order index of each node in the HIR tree
-//! to a set of values that are known to be dropped at that location.
-
-use self::cfg_build::build_control_flow_graph;
-use self::record_consumed_borrow::find_consumed_and_borrowed;
-use crate::FnCtxt;
-use hir::def_id::DefId;
-use hir::{Body, HirId, HirIdMap, Node};
-use rustc_data_structures::unord::{UnordMap, UnordSet};
-use rustc_hir as hir;
-use rustc_index::bit_set::BitSet;
-use rustc_index::IndexVec;
-use rustc_middle::hir::map::Map;
-use rustc_middle::hir::place::{PlaceBase, PlaceWithHirId};
-use rustc_middle::ty;
-use std::collections::BTreeMap;
-use std::fmt::Debug;
-
-mod cfg_build;
-mod cfg_propagate;
-mod cfg_visualize;
-mod record_consumed_borrow;
-
-pub fn compute_drop_ranges<'a, 'tcx>(
- fcx: &'a FnCtxt<'a, 'tcx>,
- def_id: DefId,
- body: &'tcx Body<'tcx>,
-) -> DropRanges {
- if fcx.sess().opts.unstable_opts.drop_tracking {
- let consumed_borrowed_places = find_consumed_and_borrowed(fcx, def_id, body);
-
- let typeck_results = &fcx.typeck_results.borrow();
- let num_exprs = fcx.tcx.region_scope_tree(def_id).body_expr_count(body.id()).unwrap_or(0);
- let (mut drop_ranges, borrowed_temporaries) = build_control_flow_graph(
- &fcx,
- typeck_results,
- fcx.param_env,
- consumed_borrowed_places,
- body,
- num_exprs,
- );
-
- drop_ranges.propagate_to_fixpoint();
-
- debug!("borrowed_temporaries = {borrowed_temporaries:?}");
- DropRanges {
- tracked_value_map: drop_ranges.tracked_value_map,
- nodes: drop_ranges.nodes,
- borrowed_temporaries: Some(borrowed_temporaries),
- }
- } else {
- // If drop range tracking is not enabled, skip all the analysis and produce an
- // empty set of DropRanges.
- DropRanges {
- tracked_value_map: UnordMap::default(),
- nodes: IndexVec::new(),
- borrowed_temporaries: None,
- }
- }
-}
-
-/// Applies `f` to consumable node in the HIR subtree pointed to by `place`.
-///
-/// This includes the place itself, and if the place is a reference to a local
-/// variable then `f` is also called on the HIR node for that variable as well.
-///
-/// For example, if `place` points to `foo()`, then `f` is called once for the
-/// result of `foo`. On the other hand, if `place` points to `x` then `f` will
-/// be called both on the `ExprKind::Path` node that represents the expression
-/// as well as the HirId of the local `x` itself.
-fn for_each_consumable(hir: Map<'_>, place: TrackedValue, mut f: impl FnMut(TrackedValue)) {
- f(place);
- let node = hir.find(place.hir_id());
- if let Some(Node::Expr(expr)) = node {
- match expr.kind {
- hir::ExprKind::Path(hir::QPath::Resolved(
- _,
- hir::Path { res: hir::def::Res::Local(hir_id), .. },
- )) => {
- f(TrackedValue::Variable(*hir_id));
- }
- _ => (),
- }
- }
-}
-
-rustc_index::newtype_index! {
- #[debug_format = "id({})"]
- pub struct PostOrderId {}
-}
-
-rustc_index::newtype_index! {
- #[debug_format = "hidx({})"]
- pub struct TrackedValueIndex {}
-}
-
-/// Identifies a value whose drop state we need to track.
-#[derive(PartialEq, Eq, Hash, Clone, Copy)]
-enum TrackedValue {
- /// Represents a named variable, such as a let binding, parameter, or upvar.
- ///
- /// The HirId points to the variable's definition site.
- Variable(HirId),
- /// A value produced as a result of an expression.
- ///
- /// The HirId points to the expression that returns this value.
- Temporary(HirId),
-}
-
-impl Debug for TrackedValue {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- ty::tls::with_opt(|opt_tcx| {
- if let Some(tcx) = opt_tcx {
- write!(f, "{}", tcx.hir().node_to_string(self.hir_id()))
- } else {
- match self {
- Self::Variable(hir_id) => write!(f, "Variable({hir_id:?})"),
- Self::Temporary(hir_id) => write!(f, "Temporary({hir_id:?})"),
- }
- }
- })
- }
-}
-
-impl TrackedValue {
- fn hir_id(&self) -> HirId {
- match self {
- TrackedValue::Variable(hir_id) | TrackedValue::Temporary(hir_id) => *hir_id,
- }
- }
-
- fn from_place_with_projections_allowed(place_with_id: &PlaceWithHirId<'_>) -> Self {
- match place_with_id.place.base {
- PlaceBase::Rvalue | PlaceBase::StaticItem => {
- TrackedValue::Temporary(place_with_id.hir_id)
- }
- PlaceBase::Local(hir_id)
- | PlaceBase::Upvar(ty::UpvarId { var_path: ty::UpvarPath { hir_id }, .. }) => {
- TrackedValue::Variable(hir_id)
- }
- }
- }
-}
-
-/// Represents a reason why we might not be able to convert a HirId or Place
-/// into a tracked value.
-#[derive(Debug)]
-enum TrackedValueConversionError {
- /// Place projects are not currently supported.
- ///
- /// The reasoning around these is kind of subtle, so we choose to be more
- /// conservative around these for now. There is no reason in theory we
- /// cannot support these, we just have not implemented it yet.
- PlaceProjectionsNotSupported,
-}
-
-impl TryFrom<&PlaceWithHirId<'_>> for TrackedValue {
- type Error = TrackedValueConversionError;
-
- fn try_from(place_with_id: &PlaceWithHirId<'_>) -> Result<Self, Self::Error> {
- if !place_with_id.place.projections.is_empty() {
- debug!(
- "TrackedValue from PlaceWithHirId: {:?} has projections, which are not supported.",
- place_with_id
- );
- return Err(TrackedValueConversionError::PlaceProjectionsNotSupported);
- }
-
- Ok(TrackedValue::from_place_with_projections_allowed(place_with_id))
- }
-}
-
-pub struct DropRanges {
- tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>,
- nodes: IndexVec<PostOrderId, NodeInfo>,
- borrowed_temporaries: Option<UnordSet<HirId>>,
-}
-
-impl DropRanges {
- pub fn is_dropped_at(&self, hir_id: HirId, location: usize) -> bool {
- self.tracked_value_map
- .get(&TrackedValue::Temporary(hir_id))
- .or(self.tracked_value_map.get(&TrackedValue::Variable(hir_id)))
- .cloned()
- .is_some_and(|tracked_value_id| {
- self.expect_node(location.into()).drop_state.contains(tracked_value_id)
- })
- }
-
- pub fn is_borrowed_temporary(&self, expr: &hir::Expr<'_>) -> bool {
- if let Some(b) = &self.borrowed_temporaries { b.contains(&expr.hir_id) } else { true }
- }
-
- /// Returns a reference to the NodeInfo for a node, panicking if it does not exist
- fn expect_node(&self, id: PostOrderId) -> &NodeInfo {
- &self.nodes[id]
- }
-}
-
-/// Tracks information needed to compute drop ranges.
-struct DropRangesBuilder {
- /// The core of DropRangesBuilder is a set of nodes, which each represent
- /// one expression. We primarily refer to them by their index in a
- /// post-order traversal of the HIR tree, since this is what
- /// generator_interior uses to talk about yield positions.
- ///
- /// This IndexVec keeps the relevant details for each node. See the
- /// NodeInfo struct for more details, but this information includes things
- /// such as the set of control-flow successors, which variables are dropped
- /// or reinitialized, and whether each variable has been inferred to be
- /// known-dropped or potentially reinitialized at each point.
- nodes: IndexVec<PostOrderId, NodeInfo>,
- /// We refer to values whose drop state we are tracking by the HirId of
- /// where they are defined. Within a NodeInfo, however, we store the
- /// drop-state in a bit vector indexed by a HirIdIndex
- /// (see NodeInfo::drop_state). The hir_id_map field stores the mapping
- /// from HirIds to the HirIdIndex that is used to represent that value in
- /// bitvector.
- tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>,
-
- /// When building the control flow graph, we don't always know the
- /// post-order index of the target node at the point we encounter it.
- /// For example, this happens with break and continue. In those cases,
- /// we store a pair of the PostOrderId of the source and the HirId
- /// of the target. Once we have gathered all of these edges, we make a
- /// pass over the set of deferred edges (see process_deferred_edges in
- /// cfg_build.rs), look up the PostOrderId for the target (since now the
- /// post-order index for all nodes is known), and add missing control flow
- /// edges.
- deferred_edges: Vec<(PostOrderId, HirId)>,
- /// This maps HirIds of expressions to their post-order index. It is
- /// used in process_deferred_edges to correctly add back-edges.
- post_order_map: HirIdMap<PostOrderId>,
-}
-
-impl Debug for DropRangesBuilder {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- f.debug_struct("DropRanges")
- .field("hir_id_map", &self.tracked_value_map)
- .field("post_order_maps", &self.post_order_map)
- .field("nodes", &self.nodes.iter_enumerated().collect::<BTreeMap<_, _>>())
- .finish()
- }
-}
-
-/// DropRanges keeps track of what values are definitely dropped at each point in the code.
-///
-/// Values of interest are defined by the hir_id of their place. Locations in code are identified
-/// by their index in the post-order traversal. At its core, DropRanges maps
-/// (hir_id, post_order_id) -> bool, where a true value indicates that the value is definitely
-/// dropped at the point of the node identified by post_order_id.
-impl DropRangesBuilder {
- /// Returns the number of values (hir_ids) that are tracked
- fn num_values(&self) -> usize {
- self.tracked_value_map.len()
- }
-
- fn node_mut(&mut self, id: PostOrderId) -> &mut NodeInfo {
- let size = self.num_values();
- self.nodes.ensure_contains_elem(id, || NodeInfo::new(size))
- }
-
- fn add_control_edge(&mut self, from: PostOrderId, to: PostOrderId) {
- trace!("adding control edge from {:?} to {:?}", from, to);
- self.node_mut(from).successors.push(to);
- }
-}
-
-#[derive(Debug)]
-struct NodeInfo {
- /// IDs of nodes that can follow this one in the control flow
- ///
- /// If the vec is empty, then control proceeds to the next node.
- successors: Vec<PostOrderId>,
-
- /// List of hir_ids that are dropped by this node.
- drops: Vec<TrackedValueIndex>,
-
- /// List of hir_ids that are reinitialized by this node.
- reinits: Vec<TrackedValueIndex>,
-
- /// Set of values that are definitely dropped at this point.
- drop_state: BitSet<TrackedValueIndex>,
-}
-
-impl NodeInfo {
- fn new(num_values: usize) -> Self {
- Self {
- successors: vec![],
- drops: vec![],
- reinits: vec![],
- drop_state: BitSet::new_filled(num_values),
- }
- }
-}
diff --git a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/record_consumed_borrow.rs b/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/record_consumed_borrow.rs
deleted file mode 100644
index 29413f080..000000000
--- a/compiler/rustc_hir_typeck/src/generator_interior/drop_ranges/record_consumed_borrow.rs
+++ /dev/null
@@ -1,242 +0,0 @@
-use super::TrackedValue;
-use crate::{
- expr_use_visitor::{self, ExprUseVisitor},
- FnCtxt,
-};
-use hir::{def_id::DefId, Body, HirId, HirIdMap};
-use rustc_data_structures::{fx::FxIndexSet, unord::UnordSet};
-use rustc_hir as hir;
-use rustc_middle::ty::{ParamEnv, TyCtxt};
-use rustc_middle::{
- hir::place::{PlaceBase, Projection, ProjectionKind},
- ty::TypeVisitableExt,
-};
-
-pub(super) fn find_consumed_and_borrowed<'a, 'tcx>(
- fcx: &'a FnCtxt<'a, 'tcx>,
- def_id: DefId,
- body: &'tcx Body<'tcx>,
-) -> ConsumedAndBorrowedPlaces {
- let mut expr_use_visitor = ExprUseDelegate::new(fcx.tcx, fcx.param_env);
- expr_use_visitor.consume_body(fcx, def_id, body);
- expr_use_visitor.places
-}
-
-pub(super) struct ConsumedAndBorrowedPlaces {
- /// Records the variables/expressions that are dropped by a given expression.
- ///
- /// The key is the hir-id of the expression, and the value is a set or hir-ids for variables
- /// or values that are consumed by that expression.
- ///
- /// Note that this set excludes "partial drops" -- for example, a statement like `drop(x.y)` is
- /// not considered a drop of `x`, although it would be a drop of `x.y`.
- pub(super) consumed: HirIdMap<FxIndexSet<TrackedValue>>,
-
- /// A set of hir-ids of values or variables that are borrowed at some point within the body.
- pub(super) borrowed: UnordSet<TrackedValue>,
-
- /// A set of hir-ids of values or variables that are borrowed at some point within the body.
- pub(super) borrowed_temporaries: UnordSet<HirId>,
-}
-
-/// Works with ExprUseVisitor to find interesting values for the drop range analysis.
-///
-/// Interesting values are those that are either dropped or borrowed. For dropped values, we also
-/// record the parent expression, which is the point where the drop actually takes place.
-struct ExprUseDelegate<'tcx> {
- tcx: TyCtxt<'tcx>,
- param_env: ParamEnv<'tcx>,
- places: ConsumedAndBorrowedPlaces,
-}
-
-impl<'tcx> ExprUseDelegate<'tcx> {
- fn new(tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Self {
- Self {
- tcx,
- param_env,
- places: ConsumedAndBorrowedPlaces {
- consumed: <_>::default(),
- borrowed: <_>::default(),
- borrowed_temporaries: <_>::default(),
- },
- }
- }
-
- fn consume_body(&mut self, fcx: &'_ FnCtxt<'_, 'tcx>, def_id: DefId, body: &'tcx Body<'tcx>) {
- // Run ExprUseVisitor to find where values are consumed.
- ExprUseVisitor::new(
- self,
- &fcx.infcx,
- def_id.expect_local(),
- fcx.param_env,
- &fcx.typeck_results.borrow(),
- )
- .consume_body(body);
- }
-
- fn mark_consumed(&mut self, consumer: HirId, target: TrackedValue) {
- self.places.consumed.entry(consumer).or_insert_with(|| <_>::default());
-
- debug!(?consumer, ?target, "mark_consumed");
- self.places.consumed.get_mut(&consumer).map(|places| places.insert(target));
- }
-
- fn borrow_place(&mut self, place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>) {
- self.places
- .borrowed
- .insert(TrackedValue::from_place_with_projections_allowed(place_with_id));
-
- // Ordinarily a value is consumed by it's parent, but in the special case of a
- // borrowed RValue, we create a reference that lives as long as the temporary scope
- // for that expression (typically, the innermost statement, but sometimes the enclosing
- // block). We record this fact here so that later in generator_interior
- // we can use the correct scope.
- //
- // We special case borrows through a dereference (`&*x`, `&mut *x` where `x` is
- // some rvalue expression), since these are essentially a copy of a pointer.
- // In other words, this borrow does not refer to the
- // temporary (`*x`), but to the referent (whatever `x` is a borrow of).
- //
- // We were considering that we might encounter problems down the line if somehow,
- // some part of the compiler were to look at this result and try to use it to
- // drive a borrowck-like analysis (this does not currently happen, as of this writing).
- // But even this should be fine, because the lifetime of the dereferenced reference
- // found in the rvalue is only significant as an intermediate 'link' to the value we
- // are producing, and we separately track whether that value is live over a yield.
- // Example:
- //
- // ```notrust
- // fn identity<T>(x: &mut T) -> &mut T { x }
- // let a: A = ...;
- // let y: &'y mut A = &mut *identity(&'a mut a);
- // ^^^^^^^^^^^^^^^^^^^^^^^^^ the borrow we are talking about
- // ```
- //
- // The expression `*identity(...)` is a deref of an rvalue,
- // where the `identity(...)` (the rvalue) produces a return type
- // of `&'rv mut A`, where `'a: 'rv`. We then assign this result to
- // `'y`, resulting in (transitively) `'a: 'y` (i.e., while `y` is in use,
- // `a` will be considered borrowed). Other parts of the code will ensure
- // that if `y` is live over a yield, `&'y mut A` appears in the generator
- // state. If `'y` is live, then any sound region analysis must conclude
- // that `'a` is also live. So if this causes a bug, blame some other
- // part of the code!
- let is_deref = place_with_id
- .place
- .projections
- .iter()
- .any(|Projection { kind, .. }| *kind == ProjectionKind::Deref);
-
- if let (false, PlaceBase::Rvalue) = (is_deref, place_with_id.place.base) {
- self.places.borrowed_temporaries.insert(place_with_id.hir_id);
- }
- }
-}
-
-impl<'tcx> expr_use_visitor::Delegate<'tcx> for ExprUseDelegate<'tcx> {
- fn consume(
- &mut self,
- place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>,
- diag_expr_id: HirId,
- ) {
- let hir = self.tcx.hir();
- let parent = match hir.opt_parent_id(place_with_id.hir_id) {
- Some(parent) => parent,
- None => place_with_id.hir_id,
- };
- debug!(
- "consume {:?}; diag_expr_id={}, using parent {}",
- place_with_id,
- hir.node_to_string(diag_expr_id),
- hir.node_to_string(parent)
- );
-
- if let Ok(tracked_value) = place_with_id.try_into() {
- self.mark_consumed(parent, tracked_value)
- }
- }
-
- fn borrow(
- &mut self,
- place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>,
- diag_expr_id: HirId,
- bk: rustc_middle::ty::BorrowKind,
- ) {
- debug!(
- "borrow: place_with_id = {place_with_id:#?}, diag_expr_id={diag_expr_id:#?}, \
- borrow_kind={bk:#?}"
- );
-
- self.borrow_place(place_with_id);
- }
-
- fn copy(
- &mut self,
- place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>,
- _diag_expr_id: HirId,
- ) {
- debug!("copy: place_with_id = {place_with_id:?}");
-
- self.places
- .borrowed
- .insert(TrackedValue::from_place_with_projections_allowed(place_with_id));
-
- // For copied we treat this mostly like a borrow except that we don't add the place
- // to borrowed_temporaries because the copy is consumed.
- }
-
- fn mutate(
- &mut self,
- assignee_place: &expr_use_visitor::PlaceWithHirId<'tcx>,
- diag_expr_id: HirId,
- ) {
- debug!("mutate {assignee_place:?}; diag_expr_id={diag_expr_id:?}");
-
- if assignee_place.place.base == PlaceBase::Rvalue
- && assignee_place.place.projections.is_empty()
- {
- // Assigning to an Rvalue is illegal unless done through a dereference. We would have
- // already gotten a type error, so we will just return here.
- return;
- }
-
- // If the type being assigned needs dropped, then the mutation counts as a borrow
- // since it is essentially doing `Drop::drop(&mut x); x = new_value;`.
- let ty = self.tcx.erase_regions(assignee_place.place.base_ty);
- if ty.has_infer() {
- self.tcx.sess.delay_span_bug(
- self.tcx.hir().span(assignee_place.hir_id),
- format!("inference variables in {ty}"),
- );
- } else if ty.needs_drop(self.tcx, self.param_env) {
- self.places
- .borrowed
- .insert(TrackedValue::from_place_with_projections_allowed(assignee_place));
- }
- }
-
- fn bind(
- &mut self,
- binding_place: &expr_use_visitor::PlaceWithHirId<'tcx>,
- diag_expr_id: HirId,
- ) {
- debug!("bind {binding_place:?}; diag_expr_id={diag_expr_id:?}");
- }
-
- fn fake_read(
- &mut self,
- place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>,
- cause: rustc_middle::mir::FakeReadCause,
- diag_expr_id: HirId,
- ) {
- debug!(
- "fake_read place_with_id={place_with_id:?}; cause={cause:?}; diag_expr_id={diag_expr_id:?}"
- );
-
- // fake reads happen in places like the scrutinee of a match expression.
- // we treat those as a borrow, much like a copy: the idea is that we are
- // transiently creating a `&T` ref that we can read from to observe the current
- // value (this `&T` is immediately dropped afterwards).
- self.borrow_place(place_with_id);
- }
-}
diff --git a/compiler/rustc_hir_typeck/src/generator_interior/mod.rs b/compiler/rustc_hir_typeck/src/generator_interior/mod.rs
deleted file mode 100644
index 6a8171224..000000000
--- a/compiler/rustc_hir_typeck/src/generator_interior/mod.rs
+++ /dev/null
@@ -1,714 +0,0 @@
-//! This calculates the types which has storage which lives across a suspension point in a
-//! generator from the perspective of typeck. The actual types used at runtime
-//! is calculated in `rustc_mir_transform::generator` and may be a subset of the
-//! types computed here.
-
-use self::drop_ranges::DropRanges;
-use super::FnCtxt;
-use rustc_data_structures::fx::{FxHashSet, FxIndexSet};
-use rustc_errors::{pluralize, DelayDm};
-use rustc_hir as hir;
-use rustc_hir::def::{CtorKind, DefKind, Res};
-use rustc_hir::def_id::DefId;
-use rustc_hir::hir_id::HirIdSet;
-use rustc_hir::intravisit::{self, Visitor};
-use rustc_hir::{Arm, Expr, ExprKind, Guard, HirId, Pat, PatKind};
-use rustc_infer::infer::{DefineOpaqueTypes, RegionVariableOrigin};
-use rustc_middle::middle::region::{self, Scope, ScopeData, YieldData};
-use rustc_middle::ty::fold::FnMutDelegate;
-use rustc_middle::ty::{self, BoundVariableKind, RvalueScopes, Ty, TyCtxt, TypeVisitableExt};
-use rustc_span::symbol::sym;
-use rustc_span::Span;
-use smallvec::{smallvec, SmallVec};
-
-mod drop_ranges;
-
-struct InteriorVisitor<'a, 'tcx> {
- fcx: &'a FnCtxt<'a, 'tcx>,
- region_scope_tree: &'a region::ScopeTree,
- types: FxIndexSet<ty::GeneratorInteriorTypeCause<'tcx>>,
- rvalue_scopes: &'a RvalueScopes,
- expr_count: usize,
- kind: hir::GeneratorKind,
- prev_unresolved_span: Option<Span>,
- linted_values: HirIdSet,
- drop_ranges: DropRanges,
-}
-
-impl<'a, 'tcx> InteriorVisitor<'a, 'tcx> {
- fn record(
- &mut self,
- ty: Ty<'tcx>,
- hir_id: HirId,
- scope: Option<region::Scope>,
- expr: Option<&'tcx Expr<'tcx>>,
- source_span: Span,
- ) {
- use rustc_span::DUMMY_SP;
-
- let ty = self.fcx.resolve_vars_if_possible(ty);
-
- debug!(
- "attempting to record type ty={:?}; hir_id={:?}; scope={:?}; expr={:?}; source_span={:?}; expr_count={:?}",
- ty, hir_id, scope, expr, source_span, self.expr_count,
- );
-
- let live_across_yield = scope
- .map(|s| {
- self.region_scope_tree.yield_in_scope(s).and_then(|yield_data| {
- // If we are recording an expression that is the last yield
- // in the scope, or that has a postorder CFG index larger
- // than the one of all of the yields, then its value can't
- // be storage-live (and therefore live) at any of the yields.
- //
- // See the mega-comment at `yield_in_scope` for a proof.
-
- yield_data
- .iter()
- .find(|yield_data| {
- debug!(
- "comparing counts yield: {} self: {}, source_span = {:?}",
- yield_data.expr_and_pat_count, self.expr_count, source_span
- );
-
- if self
- .is_dropped_at_yield_location(hir_id, yield_data.expr_and_pat_count)
- {
- debug!("value is dropped at yield point; not recording");
- return false;
- }
-
- // If it is a borrowing happening in the guard,
- // it needs to be recorded regardless because they
- // do live across this yield point.
- yield_data.expr_and_pat_count >= self.expr_count
- })
- .cloned()
- })
- })
- .unwrap_or_else(|| {
- Some(YieldData { span: DUMMY_SP, expr_and_pat_count: 0, source: self.kind.into() })
- });
-
- if let Some(yield_data) = live_across_yield {
- debug!(
- "type in expr = {:?}, scope = {:?}, type = {:?}, count = {}, yield_span = {:?}",
- expr, scope, ty, self.expr_count, yield_data.span
- );
-
- if let Some((unresolved_term, unresolved_type_span)) =
- self.fcx.first_unresolved_const_or_ty_var(&ty)
- {
- // If unresolved type isn't a ty_var then unresolved_type_span is None
- let span = self
- .prev_unresolved_span
- .unwrap_or_else(|| unresolved_type_span.unwrap_or(source_span));
-
- // If we encounter an int/float variable, then inference fallback didn't
- // finish due to some other error. Don't emit spurious additional errors.
- if let Some(unresolved_ty) = unresolved_term.ty()
- && let ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(_)) = unresolved_ty.kind()
- {
- self.fcx
- .tcx
- .sess
- .delay_span_bug(span, format!("Encountered var {unresolved_term:?}"));
- } else {
- let note = format!(
- "the type is part of the {} because of this {}",
- self.kind.descr(),
- yield_data.source
- );
-
- self.fcx
- .need_type_info_err_in_generator(self.kind, span, unresolved_term)
- .span_note(yield_data.span, note)
- .emit();
- }
- } else {
- // Insert the type into the ordered set.
- let scope_span = scope.map(|s| s.span(self.fcx.tcx, self.region_scope_tree));
-
- if !self.linted_values.contains(&hir_id) {
- check_must_not_suspend_ty(
- self.fcx,
- ty,
- hir_id,
- SuspendCheckData {
- expr,
- source_span,
- yield_span: yield_data.span,
- plural_len: 1,
- ..Default::default()
- },
- );
- self.linted_values.insert(hir_id);
- }
-
- self.types.insert(ty::GeneratorInteriorTypeCause {
- span: source_span,
- ty,
- scope_span,
- yield_span: yield_data.span,
- expr: expr.map(|e| e.hir_id),
- });
- }
- } else {
- debug!(
- "no type in expr = {:?}, count = {:?}, span = {:?}",
- expr,
- self.expr_count,
- expr.map(|e| e.span)
- );
- if let Some((unresolved_type, unresolved_type_span)) =
- self.fcx.first_unresolved_const_or_ty_var(&ty)
- {
- debug!(
- "remained unresolved_type = {:?}, unresolved_type_span: {:?}",
- unresolved_type, unresolved_type_span
- );
- self.prev_unresolved_span = unresolved_type_span;
- }
- }
- }
-
- /// If drop tracking is enabled, consult drop_ranges to see if a value is
- /// known to be dropped at a yield point and therefore can be omitted from
- /// the generator witness.
- fn is_dropped_at_yield_location(&self, value_hir_id: HirId, yield_location: usize) -> bool {
- // short-circuit if drop tracking is not enabled.
- if !self.fcx.sess().opts.unstable_opts.drop_tracking {
- return false;
- }
-
- self.drop_ranges.is_dropped_at(value_hir_id, yield_location)
- }
-}
-
-pub fn resolve_interior<'a, 'tcx>(
- fcx: &'a FnCtxt<'a, 'tcx>,
- def_id: DefId,
- body_id: hir::BodyId,
- interior: Ty<'tcx>,
- kind: hir::GeneratorKind,
-) {
- let body = fcx.tcx.hir().body(body_id);
- let typeck_results = fcx.inh.typeck_results.borrow();
- let mut visitor = InteriorVisitor {
- fcx,
- types: FxIndexSet::default(),
- region_scope_tree: fcx.tcx.region_scope_tree(def_id),
- rvalue_scopes: &typeck_results.rvalue_scopes,
- expr_count: 0,
- kind,
- prev_unresolved_span: None,
- linted_values: <_>::default(),
- drop_ranges: drop_ranges::compute_drop_ranges(fcx, def_id, body),
- };
- intravisit::walk_body(&mut visitor, body);
-
- // Check that we visited the same amount of expressions as the RegionResolutionVisitor
- let region_expr_count = fcx.tcx.region_scope_tree(def_id).body_expr_count(body_id).unwrap();
- assert_eq!(region_expr_count, visitor.expr_count);
-
- // The types are already kept in insertion order.
- let types = visitor.types;
-
- // The types in the generator interior contain lifetimes local to the generator itself,
- // which should not be exposed outside of the generator. Therefore, we replace these
- // lifetimes with existentially-bound lifetimes, which reflect the exact value of the
- // lifetimes not being known by users.
- //
- // These lifetimes are used in auto trait impl checking (for example,
- // if a Sync generator contains an &'α T, we need to check whether &'α T: Sync),
- // so knowledge of the exact relationships between them isn't particularly important.
-
- debug!("types in generator {:?}, span = {:?}", types, body.value.span);
-
- // We want to deduplicate if the lifetimes are the same modulo some non-informative counter.
- // So, we need to actually do two passes: first by type to anonymize (preserving information
- // required for diagnostics), then a second pass over all captured types to reassign disjoint
- // region indices.
- let mut captured_tys = FxHashSet::default();
- let type_causes: Vec<_> = types
- .into_iter()
- .filter_map(|mut cause| {
- // Replace all regions inside the generator interior with late bound regions.
- // Note that each region slot in the types gets a new fresh late bound region,
- // which means that none of the regions inside relate to any other, even if
- // typeck had previously found constraints that would cause them to be related.
-
- let mut counter = 0;
- let mut mk_bound_region = |kind| {
- let var = ty::BoundVar::from_u32(counter);
- counter += 1;
- ty::BoundRegion { var, kind }
- };
- let ty = fcx.normalize(cause.span, cause.ty);
- let ty = fcx.tcx.fold_regions(ty, |region, current_depth| {
- let br = match region.kind() {
- ty::ReVar(vid) => {
- let origin = fcx.region_var_origin(vid);
- match origin {
- RegionVariableOrigin::EarlyBoundRegion(span, _) => {
- mk_bound_region(ty::BrAnon(Some(span)))
- }
- _ => mk_bound_region(ty::BrAnon(None)),
- }
- }
- ty::ReEarlyBound(region) => {
- mk_bound_region(ty::BrNamed(region.def_id, region.name))
- }
- ty::ReLateBound(_, ty::BoundRegion { kind, .. })
- | ty::ReFree(ty::FreeRegion { bound_region: kind, .. }) => match kind {
- ty::BoundRegionKind::BrAnon(span) => mk_bound_region(ty::BrAnon(span)),
- ty::BoundRegionKind::BrNamed(def_id, sym) => {
- mk_bound_region(ty::BrNamed(def_id, sym))
- }
- ty::BoundRegionKind::BrEnv => mk_bound_region(ty::BrAnon(None)),
- },
- _ => mk_bound_region(ty::BrAnon(None)),
- };
- let r = ty::Region::new_late_bound(fcx.tcx, current_depth, br);
- r
- });
- captured_tys.insert(ty).then(|| {
- cause.ty = ty;
- cause
- })
- })
- .collect();
-
- let mut bound_vars: SmallVec<[BoundVariableKind; 4]> = smallvec![];
- let mut counter = 0;
- // Optimization: If there is only one captured type, then we don't actually
- // need to fold and reindex (since the first type doesn't change).
- let type_causes = if captured_tys.len() > 0 {
- // Optimization: Use `replace_escaping_bound_vars_uncached` instead of
- // `fold_regions`, since we only have late bound regions, and it skips
- // types without bound regions.
- fcx.tcx.replace_escaping_bound_vars_uncached(
- type_causes,
- FnMutDelegate {
- regions: &mut |br| {
- let kind = br.kind;
- let var = ty::BoundVar::from_usize(bound_vars.len());
- bound_vars.push(ty::BoundVariableKind::Region(kind));
- counter += 1;
- ty::Region::new_late_bound(
- fcx.tcx,
- ty::INNERMOST,
- ty::BoundRegion { var, kind },
- )
- },
- types: &mut |b| bug!("unexpected bound ty in binder: {b:?}"),
- consts: &mut |b, ty| bug!("unexpected bound ct in binder: {b:?} {ty}"),
- },
- )
- } else {
- type_causes
- };
-
- // Extract type components to build the witness type.
- let type_list = fcx.tcx.mk_type_list_from_iter(type_causes.iter().map(|cause| cause.ty));
- let bound_vars = fcx.tcx.mk_bound_variable_kinds(&bound_vars);
- let witness =
- Ty::new_generator_witness(fcx.tcx, ty::Binder::bind_with_vars(type_list, bound_vars));
-
- drop(typeck_results);
- // Store the generator types and spans into the typeck results for this generator.
- fcx.inh.typeck_results.borrow_mut().generator_interior_types =
- ty::Binder::bind_with_vars(type_causes, bound_vars);
-
- debug!(
- "types in generator after region replacement {:?}, span = {:?}",
- witness, body.value.span
- );
-
- // Unify the type variable inside the generator with the new witness
- match fcx.at(&fcx.misc(body.value.span), fcx.param_env).eq(
- DefineOpaqueTypes::No,
- interior,
- witness,
- ) {
- Ok(ok) => fcx.register_infer_ok_obligations(ok),
- _ => bug!("failed to relate {interior} and {witness}"),
- }
-}
-
-// This visitor has to have the same visit_expr calls as RegionResolutionVisitor in
-// librustc_middle/middle/region.rs since `expr_count` is compared against the results
-// there.
-impl<'a, 'tcx> Visitor<'tcx> for InteriorVisitor<'a, 'tcx> {
- fn visit_arm(&mut self, arm: &'tcx Arm<'tcx>) {
- let Arm { guard, pat, body, .. } = arm;
- self.visit_pat(pat);
- if let Some(ref g) = guard {
- {
- // If there is a guard, we need to count all variables bound in the pattern as
- // borrowed for the entire guard body, regardless of whether they are accessed.
- // We do this by walking the pattern bindings and recording `&T` for any `x: T`
- // that is bound.
-
- struct ArmPatCollector<'a, 'b, 'tcx> {
- interior_visitor: &'a mut InteriorVisitor<'b, 'tcx>,
- scope: Scope,
- }
-
- impl<'a, 'b, 'tcx> Visitor<'tcx> for ArmPatCollector<'a, 'b, 'tcx> {
- fn visit_pat(&mut self, pat: &'tcx Pat<'tcx>) {
- intravisit::walk_pat(self, pat);
- if let PatKind::Binding(_, id, ident, ..) = pat.kind {
- let ty =
- self.interior_visitor.fcx.typeck_results.borrow().node_type(id);
- let tcx = self.interior_visitor.fcx.tcx;
- let ty = Ty::new_ref(
- tcx,
- // Use `ReErased` as `resolve_interior` is going to replace all the
- // regions anyway.
- tcx.lifetimes.re_erased,
- ty::TypeAndMut { ty, mutbl: hir::Mutability::Not },
- );
- self.interior_visitor.record(
- ty,
- id,
- Some(self.scope),
- None,
- ident.span,
- );
- }
- }
- }
-
- ArmPatCollector {
- interior_visitor: self,
- scope: Scope { id: g.body().hir_id.local_id, data: ScopeData::Node },
- }
- .visit_pat(pat);
- }
-
- match g {
- Guard::If(ref e) => {
- self.visit_expr(e);
- }
- Guard::IfLet(ref l) => {
- self.visit_let_expr(l);
- }
- }
- }
- self.visit_expr(body);
- }
-
- fn visit_pat(&mut self, pat: &'tcx Pat<'tcx>) {
- intravisit::walk_pat(self, pat);
-
- self.expr_count += 1;
-
- if let PatKind::Binding(..) = pat.kind {
- let scope = self.region_scope_tree.var_scope(pat.hir_id.local_id).unwrap();
- let ty = self.fcx.typeck_results.borrow().pat_ty(pat);
- self.record(ty, pat.hir_id, Some(scope), None, pat.span);
- }
- }
-
- fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
- match &expr.kind {
- ExprKind::Call(callee, args) => match &callee.kind {
- ExprKind::Path(qpath) => {
- let res = self.fcx.typeck_results.borrow().qpath_res(qpath, callee.hir_id);
- match res {
- // Direct calls never need to keep the callee `ty::FnDef`
- // ZST in a temporary, so skip its type, just in case it
- // can significantly complicate the generator type.
- Res::Def(
- DefKind::Fn | DefKind::AssocFn | DefKind::Ctor(_, CtorKind::Fn),
- _,
- ) => {
- // NOTE(eddyb) this assumes a path expression has
- // no nested expressions to keep track of.
- self.expr_count += 1;
-
- // Record the rest of the call expression normally.
- for arg in *args {
- self.visit_expr(arg);
- }
- }
- _ => intravisit::walk_expr(self, expr),
- }
- }
- _ => intravisit::walk_expr(self, expr),
- },
- _ => intravisit::walk_expr(self, expr),
- }
-
- self.expr_count += 1;
-
- debug!("is_borrowed_temporary: {:?}", self.drop_ranges.is_borrowed_temporary(expr));
-
- let ty = self.fcx.typeck_results.borrow().expr_ty_adjusted_opt(expr);
-
- // Typically, the value produced by an expression is consumed by its parent in some way,
- // so we only have to check if the parent contains a yield (note that the parent may, for
- // example, store the value into a local variable, but then we already consider local
- // variables to be live across their scope).
- //
- // However, in the case of temporary values, we are going to store the value into a
- // temporary on the stack that is live for the current temporary scope and then return a
- // reference to it. That value may be live across the entire temporary scope.
- //
- // There's another subtlety: if the type has an observable drop, it must be dropped after
- // the yield, even if it's not borrowed or referenced after the yield. Ideally this would
- // *only* happen for types with observable drop, not all types which wrap them, but that
- // doesn't match the behavior of MIR borrowck and causes ICEs. See the FIXME comment in
- // tests/ui/generator/drop-tracking-parent-expression.rs.
- let scope = if self.drop_ranges.is_borrowed_temporary(expr)
- || ty.map_or(true, |ty| {
- // Avoid ICEs in needs_drop.
- let ty = self.fcx.resolve_vars_if_possible(ty);
- let ty = self.fcx.tcx.erase_regions(ty);
- if ty.has_infer() {
- self.fcx
- .tcx
- .sess
- .delay_span_bug(expr.span, format!("inference variables in {ty}"));
- true
- } else {
- ty.needs_drop(self.fcx.tcx, self.fcx.param_env)
- }
- }) {
- self.rvalue_scopes.temporary_scope(self.region_scope_tree, expr.hir_id.local_id)
- } else {
- let parent_expr = self
- .fcx
- .tcx
- .hir()
- .parent_iter(expr.hir_id)
- .find(|(_, node)| matches!(node, hir::Node::Expr(_)))
- .map(|(id, _)| id);
- debug!("parent_expr: {:?}", parent_expr);
- match parent_expr {
- Some(parent) => Some(Scope { id: parent.local_id, data: ScopeData::Node }),
- None => {
- self.rvalue_scopes.temporary_scope(self.region_scope_tree, expr.hir_id.local_id)
- }
- }
- };
-
- // If there are adjustments, then record the final type --
- // this is the actual value that is being produced.
- if let Some(adjusted_ty) = ty {
- self.record(adjusted_ty, expr.hir_id, scope, Some(expr), expr.span);
- }
-
- // Also record the unadjusted type (which is the only type if
- // there are no adjustments). The reason for this is that the
- // unadjusted value is sometimes a "temporary" that would wind
- // up in a MIR temporary.
- //
- // As an example, consider an expression like `vec![].push(x)`.
- // Here, the `vec![]` would wind up MIR stored into a
- // temporary variable `t` which we can borrow to invoke
- // `<Vec<_>>::push(&mut t, x)`.
- //
- // Note that an expression can have many adjustments, and we
- // are just ignoring those intermediate types. This is because
- // those intermediate values are always linearly "consumed" by
- // the other adjustments, and hence would never be directly
- // captured in the MIR.
- //
- // (Note that this partly relies on the fact that the `Deref`
- // traits always return references, which means their content
- // can be reborrowed without needing to spill to a temporary.
- // If this were not the case, then we could conceivably have
- // to create intermediate temporaries.)
- //
- // The type table might not have information for this expression
- // if it is in a malformed scope. (#66387)
- if let Some(ty) = self.fcx.typeck_results.borrow().expr_ty_opt(expr) {
- self.record(ty, expr.hir_id, scope, Some(expr), expr.span);
- } else {
- self.fcx.tcx.sess.delay_span_bug(expr.span, "no type for node");
- }
- }
-}
-
-#[derive(Default)]
-struct SuspendCheckData<'a, 'tcx> {
- expr: Option<&'tcx Expr<'tcx>>,
- source_span: Span,
- yield_span: Span,
- descr_pre: &'a str,
- descr_post: &'a str,
- plural_len: usize,
-}
-
-// Returns whether it emitted a diagnostic or not
-// Note that this fn and the proceeding one are based on the code
-// for creating must_use diagnostics
-//
-// Note that this technique was chosen over things like a `Suspend` marker trait
-// as it is simpler and has precedent in the compiler
-fn check_must_not_suspend_ty<'tcx>(
- fcx: &FnCtxt<'_, 'tcx>,
- ty: Ty<'tcx>,
- hir_id: HirId,
- data: SuspendCheckData<'_, 'tcx>,
-) -> bool {
- if ty.is_unit()
- // FIXME: should this check `Ty::is_inhabited_from`. This query is not available in this stage
- // of typeck (before ReVar and RePlaceholder are removed), but may remove noise, like in
- // `must_use`
- // || !ty.is_inhabited_from(fcx.tcx, fcx.tcx.parent_module(hir_id).to_def_id(), fcx.param_env)
- {
- return false;
- }
-
- let plural_suffix = pluralize!(data.plural_len);
-
- debug!("Checking must_not_suspend for {}", ty);
-
- match *ty.kind() {
- ty::Adt(..) if ty.is_box() => {
- let boxed_ty = ty.boxed_ty();
- let descr_pre = &format!("{}boxed ", data.descr_pre);
- check_must_not_suspend_ty(fcx, boxed_ty, hir_id, SuspendCheckData { descr_pre, ..data })
- }
- ty::Adt(def, _) => check_must_not_suspend_def(fcx.tcx, def.did(), hir_id, data),
- // FIXME: support adding the attribute to TAITs
- ty::Alias(ty::Opaque, ty::AliasTy { def_id: def, .. }) => {
- let mut has_emitted = false;
- for &(predicate, _) in fcx.tcx.explicit_item_bounds(def).skip_binder() {
- // We only look at the `DefId`, so it is safe to skip the binder here.
- if let ty::ClauseKind::Trait(ref poly_trait_predicate) =
- predicate.kind().skip_binder()
- {
- let def_id = poly_trait_predicate.trait_ref.def_id;
- let descr_pre = &format!("{}implementer{} of ", data.descr_pre, plural_suffix);
- if check_must_not_suspend_def(
- fcx.tcx,
- def_id,
- hir_id,
- SuspendCheckData { descr_pre, ..data },
- ) {
- has_emitted = true;
- break;
- }
- }
- }
- has_emitted
- }
- ty::Dynamic(binder, _, _) => {
- let mut has_emitted = false;
- for predicate in binder.iter() {
- if let ty::ExistentialPredicate::Trait(ref trait_ref) = predicate.skip_binder() {
- let def_id = trait_ref.def_id;
- let descr_post = &format!(" trait object{}{}", plural_suffix, data.descr_post);
- if check_must_not_suspend_def(
- fcx.tcx,
- def_id,
- hir_id,
- SuspendCheckData { descr_post, ..data },
- ) {
- has_emitted = true;
- break;
- }
- }
- }
- has_emitted
- }
- ty::Tuple(fields) => {
- let mut has_emitted = false;
- let comps = match data.expr.map(|e| &e.kind) {
- Some(hir::ExprKind::Tup(comps)) if comps.len() == fields.len() => Some(comps),
- _ => None,
- };
- for (i, ty) in fields.iter().enumerate() {
- let descr_post = &format!(" in tuple element {i}");
- let span = comps.and_then(|c| c.get(i)).map(|e| e.span).unwrap_or(data.source_span);
- if check_must_not_suspend_ty(
- fcx,
- ty,
- hir_id,
- SuspendCheckData {
- descr_post,
- expr: comps.and_then(|comps| comps.get(i)),
- source_span: span,
- ..data
- },
- ) {
- has_emitted = true;
- }
- }
- has_emitted
- }
- ty::Array(ty, len) => {
- let descr_pre = &format!("{}array{} of ", data.descr_pre, plural_suffix);
- check_must_not_suspend_ty(
- fcx,
- ty,
- hir_id,
- SuspendCheckData {
- descr_pre,
- plural_len: len.try_eval_target_usize(fcx.tcx, fcx.param_env).unwrap_or(0)
- as usize
- + 1,
- ..data
- },
- )
- }
- // If drop tracking is enabled, we want to look through references, since the referent
- // may not be considered live across the await point.
- ty::Ref(_region, ty, _mutability) if fcx.sess().opts.unstable_opts.drop_tracking => {
- let descr_pre = &format!("{}reference{} to ", data.descr_pre, plural_suffix);
- check_must_not_suspend_ty(fcx, ty, hir_id, SuspendCheckData { descr_pre, ..data })
- }
- _ => false,
- }
-}
-
-fn check_must_not_suspend_def(
- tcx: TyCtxt<'_>,
- def_id: DefId,
- hir_id: HirId,
- data: SuspendCheckData<'_, '_>,
-) -> bool {
- if let Some(attr) = tcx.get_attr(def_id, sym::must_not_suspend) {
- tcx.struct_span_lint_hir(
- rustc_session::lint::builtin::MUST_NOT_SUSPEND,
- hir_id,
- data.source_span,
- DelayDm(|| {
- format!(
- "{}`{}`{} held across a suspend point, but should not be",
- data.descr_pre,
- tcx.def_path_str(def_id),
- data.descr_post,
- )
- }),
- |lint| {
- // add span pointing to the offending yield/await
- lint.span_label(data.yield_span, "the value is held across this suspend point");
-
- // Add optional reason note
- if let Some(note) = attr.value_str() {
- // FIXME(guswynn): consider formatting this better
- lint.span_note(data.source_span, note.to_string());
- }
-
- // Add some quick suggestions on what to do
- // FIXME: can `drop` work as a suggestion here as well?
- lint.span_help(
- data.source_span,
- "consider using a block (`{ ... }`) \
- to shrink the value's scope, ending before the suspend point",
- );
-
- lint
- },
- );
-
- true
- } else {
- false
- }
-}
diff --git a/compiler/rustc_hir_typeck/src/lib.rs b/compiler/rustc_hir_typeck/src/lib.rs
index c4d3cbc9f..6873382c4 100644
--- a/compiler/rustc_hir_typeck/src/lib.rs
+++ b/compiler/rustc_hir_typeck/src/lib.rs
@@ -32,7 +32,6 @@ pub mod expr_use_visitor;
mod fallback;
mod fn_ctxt;
mod gather_locals;
-mod generator_interior;
mod inherited;
mod intrinsicck;
mod mem_categorization;
@@ -436,6 +435,12 @@ fn fatally_break_rust(tcx: TyCtxt<'_>) {
tcx.sess.cfg_version,
config::host_triple(),
));
+ if let Some((flags, excluded_cargo_defaults)) = rustc_session::utils::extra_compiler_flags() {
+ handler.note_without_error(format!("compiler flags: {}", flags.join(" ")));
+ if excluded_cargo_defaults {
+ handler.note_without_error("some of the compiler flags provided by cargo are hidden");
+ }
+ }
}
fn has_expected_num_generic_args(tcx: TyCtxt<'_>, trait_did: DefId, expected: usize) -> bool {
diff --git a/compiler/rustc_hir_typeck/src/mem_categorization.rs b/compiler/rustc_hir_typeck/src/mem_categorization.rs
index 9574da021..337d12b2d 100644
--- a/compiler/rustc_hir_typeck/src/mem_categorization.rs
+++ b/compiler/rustc_hir_typeck/src/mem_categorization.rs
@@ -557,10 +557,7 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
Ok(adt_def.variant_index_with_ctor_id(variant_ctor_id))
}
Res::Def(DefKind::Ctor(CtorOf::Struct, ..), _)
- | Res::Def(
- DefKind::Struct | DefKind::Union | DefKind::TyAlias { .. } | DefKind::AssocTy,
- _,
- )
+ | Res::Def(DefKind::Struct | DefKind::Union | DefKind::TyAlias | DefKind::AssocTy, _)
| Res::SelfCtor(..)
| Res::SelfTyParam { .. }
| Res::SelfTyAlias { .. } => {
diff --git a/compiler/rustc_hir_typeck/src/method/mod.rs b/compiler/rustc_hir_typeck/src/method/mod.rs
index 6dd131aa2..86a0e95de 100644
--- a/compiler/rustc_hir_typeck/src/method/mod.rs
+++ b/compiler/rustc_hir_typeck/src/method/mod.rs
@@ -89,14 +89,13 @@ pub enum CandidateSource {
}
impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
- /// Determines whether the type `self_ty` supports a method name `method_name` or not.
+ /// Determines whether the type `self_ty` supports a visible method named `method_name` or not.
#[instrument(level = "debug", skip(self))]
pub fn method_exists(
&self,
method_name: Ident,
self_ty: Ty<'tcx>,
call_expr_id: hir::HirId,
- allow_private: bool,
return_type: Option<Ty<'tcx>>,
) -> bool {
match self.probe_for_name(
@@ -118,7 +117,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
Err(NoMatch(..)) => false,
Err(Ambiguity(..)) => true,
- Err(PrivateMatch(..)) => allow_private,
+ Err(PrivateMatch(..)) => false,
Err(IllegalSizedBound { .. }) => true,
Err(BadReturnType) => false,
}
diff --git a/compiler/rustc_hir_typeck/src/method/suggest.rs b/compiler/rustc_hir_typeck/src/method/suggest.rs
index 72a04a02b..07c48ec63 100644
--- a/compiler/rustc_hir_typeck/src/method/suggest.rs
+++ b/compiler/rustc_hir_typeck/src/method/suggest.rs
@@ -2361,8 +2361,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
Some(output_ty) => self.resolve_vars_if_possible(output_ty),
_ => return,
};
- let method_exists =
- self.method_exists(item_name, output_ty, call.hir_id, true, return_type);
+ let method_exists = self.method_exists(item_name, output_ty, call.hir_id, return_type);
debug!("suggest_await_before_method: is_method_exist={}", method_exists);
if method_exists {
err.span_suggestion_verbose(
diff --git a/compiler/rustc_hir_typeck/src/upvar.rs b/compiler/rustc_hir_typeck/src/upvar.rs
index 1a41786d2..4d6413903 100644
--- a/compiler/rustc_hir_typeck/src/upvar.rs
+++ b/compiler/rustc_hir_typeck/src/upvar.rs
@@ -41,6 +41,7 @@ use rustc_hir::intravisit::{self, Visitor};
use rustc_infer::infer::UpvarRegion;
use rustc_middle::hir::place::{Place, PlaceBase, PlaceWithHirId, Projection, ProjectionKind};
use rustc_middle::mir::FakeReadCause;
+use rustc_middle::traits::ObligationCauseCode;
use rustc_middle::ty::{
self, ClosureSizeProfileData, Ty, TyCtxt, TypeckResults, UpvarArgs, UpvarCapture,
};
@@ -195,7 +196,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
assert_eq!(self.tcx.hir().body_owner_def_id(body.id()), closure_def_id);
let mut delegate = InferBorrowKind {
- fcx: self,
closure_def_id,
capture_information: Default::default(),
fake_reads: Default::default(),
@@ -296,6 +296,20 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
let final_upvar_tys = self.final_upvar_tys(closure_def_id);
debug!(?closure_hir_id, ?args, ?final_upvar_tys);
+ if self.tcx.features().unsized_locals || self.tcx.features().unsized_fn_params {
+ for capture in
+ self.typeck_results.borrow().closure_min_captures_flattened(closure_def_id)
+ {
+ if let UpvarCapture::ByValue = capture.info.capture_kind {
+ self.require_type_is_sized(
+ capture.place.ty(),
+ capture.get_path_span(self.tcx),
+ ObligationCauseCode::SizedClosureCapture(closure_def_id),
+ );
+ }
+ }
+ }
+
// Build a tuple (U0..Un) of the final upvar types U0..Un
// and unify the upvar tuple type in the closure with it:
let final_tupled_upvars_type = Ty::new_tup(self.tcx, &final_upvar_tys);
@@ -1607,34 +1621,20 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
/// Truncate the capture so that the place being borrowed is in accordance with RFC 1240,
/// which states that it's unsafe to take a reference into a struct marked `repr(packed)`.
fn restrict_repr_packed_field_ref_capture<'tcx>(
- tcx: TyCtxt<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
mut place: Place<'tcx>,
mut curr_borrow_kind: ty::UpvarCapture,
) -> (Place<'tcx>, ty::UpvarCapture) {
let pos = place.projections.iter().enumerate().position(|(i, p)| {
let ty = place.ty_before_projection(i);
- // Return true for fields of packed structs, unless those fields have alignment 1.
+ // Return true for fields of packed structs.
match p.kind {
ProjectionKind::Field(..) => match ty.kind() {
ty::Adt(def, _) if def.repr().packed() => {
- // We erase regions here because they cannot be hashed
- match tcx.layout_of(param_env.and(tcx.erase_regions(p.ty))) {
- Ok(layout) if layout.align.abi.bytes() == 1 => {
- // if the alignment is 1, the type can't be further
- // disaligned.
- debug!(
- "restrict_repr_packed_field_ref_capture: ({:?}) - align = 1",
- place
- );
- false
- }
- _ => {
- debug!("restrict_repr_packed_field_ref_capture: ({:?}) - true", place);
- true
- }
- }
+ // We stop here regardless of field alignment. Field alignment can change as
+ // types change, including the types of private fields in other crates, and that
+ // shouldn't affect how we compute our captures.
+ true
}
_ => false,
@@ -1689,9 +1689,7 @@ fn drop_location_span(tcx: TyCtxt<'_>, hir_id: hir::HirId) -> Span {
tcx.sess.source_map().end_point(owner_span)
}
-struct InferBorrowKind<'a, 'tcx> {
- fcx: &'a FnCtxt<'a, 'tcx>,
-
+struct InferBorrowKind<'tcx> {
// The def-id of the closure whose kind and upvar accesses are being inferred.
closure_def_id: LocalDefId,
@@ -1725,7 +1723,7 @@ struct InferBorrowKind<'a, 'tcx> {
fake_reads: Vec<(Place<'tcx>, FakeReadCause, hir::HirId)>,
}
-impl<'a, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'tcx> {
+impl<'tcx> euv::Delegate<'tcx> for InferBorrowKind<'tcx> {
fn fake_read(
&mut self,
place: &PlaceWithHirId<'tcx>,
@@ -1740,12 +1738,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'tcx> {
let (place, _) = restrict_capture_precision(place.place.clone(), dummy_capture_kind);
- let (place, _) = restrict_repr_packed_field_ref_capture(
- self.fcx.tcx,
- self.fcx.param_env,
- place,
- dummy_capture_kind,
- );
+ let (place, _) = restrict_repr_packed_field_ref_capture(place, dummy_capture_kind);
self.fake_reads.push((place, cause, diag_expr_id));
}
@@ -1780,12 +1773,8 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'tcx> {
// We only want repr packed restriction to be applied to reading references into a packed
// struct, and not when the data is being moved. Therefore we call this method here instead
// of in `restrict_capture_precision`.
- let (place, mut capture_kind) = restrict_repr_packed_field_ref_capture(
- self.fcx.tcx,
- self.fcx.param_env,
- place_with_id.place.clone(),
- capture_kind,
- );
+ let (place, mut capture_kind) =
+ restrict_repr_packed_field_ref_capture(place_with_id.place.clone(), capture_kind);
// Raw pointers don't inherit mutability
if place_with_id.place.deref_tys().any(Ty::is_unsafe_ptr) {
diff --git a/compiler/rustc_hir_typeck/src/writeback.rs b/compiler/rustc_hir_typeck/src/writeback.rs
index 603681bbc..9c16b486d 100644
--- a/compiler/rustc_hir_typeck/src/writeback.rs
+++ b/compiler/rustc_hir_typeck/src/writeback.rs
@@ -46,7 +46,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
}
// Type only exists for constants and statics, not functions.
match self.tcx.hir().body_owner_kind(item_def_id) {
- hir::BodyOwnerKind::Const | hir::BodyOwnerKind::Static(_) => {
+ hir::BodyOwnerKind::Const { .. } | hir::BodyOwnerKind::Static(_) => {
let item_hir_id = self.tcx.hir().local_def_id_to_hir_id(item_def_id);
wbcx.visit_node_id(body.value.span, item_hir_id);
}
@@ -63,7 +63,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
wbcx.visit_coercion_casts();
wbcx.visit_user_provided_tys();
wbcx.visit_user_provided_sigs();
- wbcx.visit_generator_interior_types();
+ wbcx.visit_generator_interior();
wbcx.visit_offset_of_container_types();
wbcx.typeck_results.rvalue_scopes =
@@ -538,11 +538,9 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
);
}
- fn visit_generator_interior_types(&mut self) {
+ fn visit_generator_interior(&mut self) {
let fcx_typeck_results = self.fcx.typeck_results.borrow();
assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
- self.typeck_results.generator_interior_types =
- fcx_typeck_results.generator_interior_types.clone();
self.tcx().with_stable_hashing_context(move |ref hcx| {
for (&expr_def_id, predicates) in
fcx_typeck_results.generator_interior_predicates.to_sorted(hcx, false).into_iter()
diff --git a/compiler/rustc_incremental/src/assert_dep_graph.rs b/compiler/rustc_incremental/src/assert_dep_graph.rs
index 5e7ae3ecd..1b160eca9 100644
--- a/compiler/rustc_incremental/src/assert_dep_graph.rs
+++ b/compiler/rustc_incremental/src/assert_dep_graph.rs
@@ -42,7 +42,7 @@ use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
use rustc_hir::intravisit::{self, Visitor};
use rustc_middle::dep_graph::{
- DepGraphQuery, DepKind, DepNode, DepNodeExt, DepNodeFilter, EdgeFilter,
+ dep_kinds, DepGraphQuery, DepKind, DepNode, DepNodeExt, DepNodeFilter, EdgeFilter,
};
use rustc_middle::hir::nested_filter;
use rustc_middle::ty::TyCtxt;
@@ -129,7 +129,7 @@ impl<'tcx> IfThisChanged<'tcx> {
let dep_node_interned = self.argument(attr);
let dep_node = match dep_node_interned {
None => {
- DepNode::from_def_path_hash(self.tcx, def_path_hash, DepKind::hir_owner)
+ DepNode::from_def_path_hash(self.tcx, def_path_hash, dep_kinds::hir_owner)
}
Some(n) => {
match DepNode::from_label_string(self.tcx, n.as_str(), def_path_hash) {
diff --git a/compiler/rustc_incremental/src/lib.rs b/compiler/rustc_incremental/src/lib.rs
index b9171fad5..220ea194a 100644
--- a/compiler/rustc_incremental/src/lib.rs
+++ b/compiler/rustc_incremental/src/lib.rs
@@ -28,8 +28,8 @@ pub use persist::load_query_result_cache;
pub use persist::prepare_session_directory;
pub use persist::save_dep_graph;
pub use persist::save_work_product_index;
+pub use persist::setup_dep_graph;
pub use persist::LoadResult;
-pub use persist::{build_dep_graph, load_dep_graph, DepGraphFuture};
use rustc_errors::{DiagnosticMessage, SubdiagnosticMessage};
use rustc_fluent_macro::fluent_messages;
diff --git a/compiler/rustc_incremental/src/persist/load.rs b/compiler/rustc_incremental/src/persist/load.rs
index 8d67f6925..2310d0b12 100644
--- a/compiler/rustc_incremental/src/persist/load.rs
+++ b/compiler/rustc_incremental/src/persist/load.rs
@@ -3,17 +3,19 @@
use crate::errors;
use rustc_data_structures::memmap::Mmap;
use rustc_data_structures::unord::UnordMap;
-use rustc_middle::dep_graph::{SerializedDepGraph, WorkProductMap};
+use rustc_middle::dep_graph::{DepGraph, DepsType, SerializedDepGraph, WorkProductMap};
use rustc_middle::query::on_disk_cache::OnDiskCache;
use rustc_serialize::opaque::MemDecoder;
use rustc_serialize::Decodable;
use rustc_session::config::IncrementalStateAssertion;
-use rustc_session::Session;
+use rustc_session::{Session, StableCrateId};
+use rustc_span::{ErrorGuaranteed, Symbol};
use std::path::{Path, PathBuf};
use super::data::*;
use super::file_format;
use super::fs::*;
+use super::save::build_dep_graph;
use super::work_product;
#[derive(Debug)]
@@ -72,21 +74,12 @@ impl<T: Default> LoadResult<T> {
}
fn load_data(path: &Path, sess: &Session) -> LoadResult<(Mmap, usize)> {
- load_data_no_sess(
+ match file_format::read_file(
path,
sess.opts.unstable_opts.incremental_info,
sess.is_nightly_build(),
sess.cfg_version,
- )
-}
-
-fn load_data_no_sess(
- path: &Path,
- report_incremental_info: bool,
- is_nightly_build: bool,
- cfg_version: &'static str,
-) -> LoadResult<(Mmap, usize)> {
- match file_format::read_file(path, report_incremental_info, is_nightly_build, cfg_version) {
+ ) {
Ok(Some(data_and_pos)) => LoadResult::Ok { data: data_and_pos },
Ok(None) => {
// The file either didn't exist or was produced by an incompatible
@@ -102,39 +95,12 @@ fn delete_dirty_work_product(sess: &Session, swp: SerializedWorkProduct) {
work_product::delete_workproduct_files(sess, &swp.work_product);
}
-/// Either a result that has already be computed or a
-/// handle that will let us wait until it is computed
-/// by a background thread.
-pub enum MaybeAsync<T> {
- Sync(T),
- Async(std::thread::JoinHandle<T>),
-}
-
-impl<T> MaybeAsync<LoadResult<T>> {
- /// Accesses the data returned in [`LoadResult::Ok`] in an asynchronous way if possible.
- pub fn open(self) -> LoadResult<T> {
- match self {
- MaybeAsync::Sync(result) => result,
- MaybeAsync::Async(handle) => {
- handle.join().unwrap_or_else(|e| LoadResult::DecodeIncrCache(e))
- }
- }
- }
-}
-
-/// An asynchronous type for computing the dependency graph.
-pub type DepGraphFuture = MaybeAsync<LoadResult<(SerializedDepGraph, WorkProductMap)>>;
-
-/// Launch a thread and load the dependency graph in the background.
-pub fn load_dep_graph(sess: &Session) -> DepGraphFuture {
- // Since `sess` isn't `Sync`, we perform all accesses to `sess`
- // before we fire the background thread.
-
+fn load_dep_graph(sess: &Session) -> LoadResult<(SerializedDepGraph, WorkProductMap)> {
let prof = sess.prof.clone();
if sess.opts.incremental.is_none() {
// No incremental compilation.
- return MaybeAsync::Sync(LoadResult::Ok { data: Default::default() });
+ return LoadResult::Ok { data: Default::default() };
}
let _timer = sess.prof.generic_activity("incr_comp_prepare_load_dep_graph");
@@ -142,7 +108,6 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture {
// Calling `sess.incr_comp_session_dir()` will panic if `sess.opts.incremental.is_none()`.
// Fortunately, we just checked that this isn't the case.
let path = dep_graph_path(&sess);
- let report_incremental_info = sess.opts.unstable_opts.incremental_info;
let expected_hash = sess.opts.dep_tracking_hash(false);
let mut prev_work_products = UnordMap::default();
@@ -180,40 +145,35 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture {
}
}
- let is_nightly_build = sess.is_nightly_build();
- let cfg_version = sess.cfg_version;
-
- MaybeAsync::Async(std::thread::spawn(move || {
- let _prof_timer = prof.generic_activity("incr_comp_load_dep_graph");
+ let _prof_timer = prof.generic_activity("incr_comp_load_dep_graph");
- match load_data_no_sess(&path, report_incremental_info, is_nightly_build, cfg_version) {
- LoadResult::DataOutOfDate => LoadResult::DataOutOfDate,
- LoadResult::LoadDepGraph(path, err) => LoadResult::LoadDepGraph(path, err),
- LoadResult::DecodeIncrCache(err) => LoadResult::DecodeIncrCache(err),
- LoadResult::Ok { data: (bytes, start_pos) } => {
- let mut decoder = MemDecoder::new(&bytes, start_pos);
- let prev_commandline_args_hash = u64::decode(&mut decoder);
+ match load_data(&path, sess) {
+ LoadResult::DataOutOfDate => LoadResult::DataOutOfDate,
+ LoadResult::LoadDepGraph(path, err) => LoadResult::LoadDepGraph(path, err),
+ LoadResult::DecodeIncrCache(err) => LoadResult::DecodeIncrCache(err),
+ LoadResult::Ok { data: (bytes, start_pos) } => {
+ let mut decoder = MemDecoder::new(&bytes, start_pos);
+ let prev_commandline_args_hash = u64::decode(&mut decoder);
- if prev_commandline_args_hash != expected_hash {
- if report_incremental_info {
- eprintln!(
- "[incremental] completely ignoring cache because of \
+ if prev_commandline_args_hash != expected_hash {
+ if sess.opts.unstable_opts.incremental_info {
+ eprintln!(
+ "[incremental] completely ignoring cache because of \
differing commandline arguments"
- );
- }
- // We can't reuse the cache, purge it.
- debug!("load_dep_graph_new: differing commandline arg hashes");
-
- // No need to do any further work
- return LoadResult::DataOutOfDate;
+ );
}
+ // We can't reuse the cache, purge it.
+ debug!("load_dep_graph_new: differing commandline arg hashes");
- let dep_graph = SerializedDepGraph::decode(&mut decoder);
-
- LoadResult::Ok { data: (dep_graph, prev_work_products) }
+ // No need to do any further work
+ return LoadResult::DataOutOfDate;
}
+
+ let dep_graph = SerializedDepGraph::decode::<DepsType>(&mut decoder);
+
+ LoadResult::Ok { data: (dep_graph, prev_work_products) }
}
- }))
+ }
}
/// Attempts to load the query result cache from disk
@@ -235,3 +195,35 @@ pub fn load_query_result_cache(sess: &Session) -> Option<OnDiskCache<'_>> {
_ => Some(OnDiskCache::new_empty(sess.source_map())),
}
}
+
+/// Setups the dependency graph by loading an existing graph from disk and set up streaming of a
+/// new graph to an incremental session directory.
+pub fn setup_dep_graph(
+ sess: &Session,
+ crate_name: Symbol,
+ stable_crate_id: StableCrateId,
+) -> Result<DepGraph, ErrorGuaranteed> {
+ // `load_dep_graph` can only be called after `prepare_session_directory`.
+ prepare_session_directory(sess, crate_name, stable_crate_id)?;
+
+ let res = sess.opts.build_dep_graph().then(|| load_dep_graph(sess));
+
+ if sess.opts.incremental.is_some() {
+ sess.time("incr_comp_garbage_collect_session_directories", || {
+ if let Err(e) = garbage_collect_session_directories(sess) {
+ warn!(
+ "Error while trying to garbage collect incremental \
+ compilation cache directory: {}",
+ e
+ );
+ }
+ });
+ }
+
+ Ok(res
+ .and_then(|result| {
+ let (prev_graph, prev_work_products) = result.open(sess);
+ build_dep_graph(sess, prev_graph, prev_work_products)
+ })
+ .unwrap_or_else(DepGraph::new_disabled))
+}
diff --git a/compiler/rustc_incremental/src/persist/mod.rs b/compiler/rustc_incremental/src/persist/mod.rs
index 1336189bc..fdecaca5a 100644
--- a/compiler/rustc_incremental/src/persist/mod.rs
+++ b/compiler/rustc_incremental/src/persist/mod.rs
@@ -16,9 +16,8 @@ pub use fs::in_incr_comp_dir;
pub use fs::in_incr_comp_dir_sess;
pub use fs::prepare_session_directory;
pub use load::load_query_result_cache;
+pub use load::setup_dep_graph;
pub use load::LoadResult;
-pub use load::{load_dep_graph, DepGraphFuture};
-pub use save::build_dep_graph;
pub use save::save_dep_graph;
pub use save::save_work_product_index;
pub use work_product::copy_cgu_workproduct_to_incr_comp_cache_dir;
diff --git a/compiler/rustc_incremental/src/persist/save.rs b/compiler/rustc_incremental/src/persist/save.rs
index 0cfaf5837..210da751d 100644
--- a/compiler/rustc_incremental/src/persist/save.rs
+++ b/compiler/rustc_incremental/src/persist/save.rs
@@ -48,18 +48,6 @@ pub fn save_dep_graph(tcx: TyCtxt<'_>) {
join(
move || {
- sess.time("incr_comp_persist_result_cache", || {
- // Drop the memory map so that we can remove the file and write to it.
- if let Some(odc) = &tcx.query_system.on_disk_cache {
- odc.drop_serialized_data(tcx);
- }
-
- file_format::save_in(sess, query_cache_path, "query cache", |e| {
- encode_query_cache(tcx, e)
- });
- });
- },
- move || {
sess.time("incr_comp_persist_dep_graph", || {
if let Err(err) = tcx.dep_graph.encode(&tcx.sess.prof) {
sess.emit_err(errors::WriteDepGraph { path: &staging_dep_graph_path, err });
@@ -73,6 +61,20 @@ pub fn save_dep_graph(tcx: TyCtxt<'_>) {
}
});
},
+ move || {
+ // We execute this after `incr_comp_persist_dep_graph` for the serial compiler
+ // to catch any potential query execution writing to the dep graph.
+ sess.time("incr_comp_persist_result_cache", || {
+ // Drop the memory map so that we can remove the file and write to it.
+ if let Some(odc) = &tcx.query_system.on_disk_cache {
+ odc.drop_serialized_data(tcx);
+ }
+
+ file_format::save_in(sess, query_cache_path, "query cache", |e| {
+ encode_query_cache(tcx, e)
+ });
+ });
+ },
);
})
}
@@ -145,7 +147,7 @@ fn encode_query_cache(tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResult
/// execution, the new dependency information is not kept in memory but directly
/// output to this file. `save_dep_graph` then finalizes the staging dep-graph
/// and moves it to the permanent dep-graph path
-pub fn build_dep_graph(
+pub(crate) fn build_dep_graph(
sess: &Session,
prev_graph: SerializedDepGraph,
prev_work_products: WorkProductMap,
diff --git a/compiler/rustc_index/src/lib.rs b/compiler/rustc_index/src/lib.rs
index 9942c70c4..061c55c01 100644
--- a/compiler/rustc_index/src/lib.rs
+++ b/compiler/rustc_index/src/lib.rs
@@ -12,7 +12,7 @@
test
)
)]
-#![cfg_attr(all(not(bootstrap), feature = "nightly"), allow(internal_features))]
+#![cfg_attr(feature = "nightly", allow(internal_features))]
#[cfg(feature = "nightly")]
pub mod bit_set;
@@ -29,6 +29,18 @@ pub use {idx::Idx, slice::IndexSlice, vec::IndexVec};
pub use rustc_macros::newtype_index;
/// Type size assertion. The first argument is a type and the second argument is its expected size.
+///
+/// <div class="warning">
+///
+/// Emitting hard errors from size assertions like this is generally not
+/// recommended, especially in libraries, because they can cause build failures if the layout
+/// algorithm or dependencies change. Here in rustc we control the toolchain and layout algorithm,
+/// so the former is not a problem. For the latter we have a lockfile as rustc is an application and
+/// precompiled library.
+///
+/// Short version: Don't copy this macro into your own code. Use a `#[test]` instead.
+///
+/// </div>
#[macro_export]
macro_rules! static_assert_size {
($ty:ty, $size:expr) => {
diff --git a/compiler/rustc_infer/messages.ftl b/compiler/rustc_infer/messages.ftl
index 4d0e77063..46558997f 100644
--- a/compiler/rustc_infer/messages.ftl
+++ b/compiler/rustc_infer/messages.ftl
@@ -198,6 +198,10 @@ infer_nothing = {""}
infer_oc_cant_coerce = cannot coerce intrinsics to function pointers
infer_oc_closure_selfref = closure/generator type that references itself
infer_oc_const_compat = const not compatible with trait
+infer_oc_fn_lang_correct_type = {$lang_item_name ->
+ [panic_impl] `#[panic_handler]`
+ *[lang_item_name] lang item `{$lang_item_name}`
+ } function has wrong type
infer_oc_fn_main_correct_type = `main` function has wrong type
infer_oc_fn_start_correct_type = `#[start]` function has wrong type
infer_oc_generic = mismatched types
@@ -337,6 +341,7 @@ infer_subtype = ...so that the {$requirement ->
[no_else] `if` missing an `else` returns `()`
[fn_main_correct_type] `main` function has the correct type
[fn_start_correct_type] `#[start]` function has the correct type
+ [fn_lang_correct_type] lang item function has the correct type
[intrinsic_correct_type] intrinsic has the correct type
[method_correct_type] method receiver has the correct type
*[other] types are compatible
@@ -350,6 +355,7 @@ infer_subtype_2 = ...so that {$requirement ->
[no_else] `if` missing an `else` returns `()`
[fn_main_correct_type] `main` function has the correct type
[fn_start_correct_type] `#[start]` function has the correct type
+ [fn_lang_correct_type] lang item function has the correct type
[intrinsic_correct_type] intrinsic has the correct type
[method_correct_type] method receiver has the correct type
*[other] types are compatible
diff --git a/compiler/rustc_infer/src/errors/mod.rs b/compiler/rustc_infer/src/errors/mod.rs
index a7e045e1e..ad4525c92 100644
--- a/compiler/rustc_infer/src/errors/mod.rs
+++ b/compiler/rustc_infer/src/errors/mod.rs
@@ -14,8 +14,7 @@ use rustc_span::{symbol::Ident, BytePos, Span};
use crate::fluent_generated as fluent;
use crate::infer::error_reporting::{
- need_type_info::{GeneratorKindAsDiagArg, UnderspecifiedArgKind},
- nice_region_error::placeholder_error::Highlighted,
+ need_type_info::UnderspecifiedArgKind, nice_region_error::placeholder_error::Highlighted,
ObligationCauseAsDiagArg,
};
@@ -86,16 +85,6 @@ pub struct AmbiguousReturn<'a> {
pub multi_suggestions: Vec<SourceKindMultiSuggestion<'a>>,
}
-#[derive(Diagnostic)]
-#[diag(infer_need_type_info_in_generator, code = "E0698")]
-pub struct NeedTypeInfoInGenerator<'a> {
- #[primary_span]
- pub span: Span,
- pub generator_kind: GeneratorKindAsDiagArg,
- #[subdiagnostic]
- pub bad_label: InferenceBadError<'a>,
-}
-
// Used when a better one isn't available
#[derive(Subdiagnostic)]
#[label(infer_label_bad)]
@@ -1463,6 +1452,14 @@ pub enum ObligationCauseFailureCode {
#[subdiagnostic]
subdiags: Vec<TypeErrorAdditionalDiags>,
},
+ #[diag(infer_oc_fn_lang_correct_type, code = "E0308")]
+ FnLangCorrectType {
+ #[primary_span]
+ span: Span,
+ #[subdiagnostic]
+ subdiags: Vec<TypeErrorAdditionalDiags>,
+ lang_item_name: Symbol,
+ },
#[diag(infer_oc_intrinsic_correct_type, code = "E0308")]
IntrinsicCorrectType {
#[primary_span]
diff --git a/compiler/rustc_infer/src/errors/note_and_explain.rs b/compiler/rustc_infer/src/errors/note_and_explain.rs
index bd168f047..9276bb0a7 100644
--- a/compiler/rustc_infer/src/errors/note_and_explain.rs
+++ b/compiler/rustc_infer/src/errors/note_and_explain.rs
@@ -56,11 +56,8 @@ impl<'a> DescriptionCtx<'a> {
(Some(span), "as_defined", name.to_string())
}
}
- ty::BrAnon(span) => {
- let span = match span {
- Some(_) => span,
- None => Some(tcx.def_span(scope)),
- };
+ ty::BrAnon => {
+ let span = Some(tcx.def_span(scope));
(span, "defined_here", String::new())
}
_ => {
diff --git a/compiler/rustc_infer/src/infer/at.rs b/compiler/rustc_infer/src/infer/at.rs
index 6d5db3336..2797d0797 100644
--- a/compiler/rustc_infer/src/infer/at.rs
+++ b/compiler/rustc_infer/src/infer/at.rs
@@ -478,7 +478,28 @@ impl<'tcx> ToTrace<'tcx> for ty::FnSig<'tcx> {
a: Self,
b: Self,
) -> TypeTrace<'tcx> {
- TypeTrace { cause: cause.clone(), values: Sigs(ExpectedFound::new(a_is_expected, a, b)) }
+ TypeTrace {
+ cause: cause.clone(),
+ values: PolySigs(ExpectedFound::new(
+ a_is_expected,
+ ty::Binder::dummy(a),
+ ty::Binder::dummy(b),
+ )),
+ }
+ }
+}
+
+impl<'tcx> ToTrace<'tcx> for ty::PolyFnSig<'tcx> {
+ fn to_trace(
+ cause: &ObligationCause<'tcx>,
+ a_is_expected: bool,
+ a: Self,
+ b: Self,
+ ) -> TypeTrace<'tcx> {
+ TypeTrace {
+ cause: cause.clone(),
+ values: PolySigs(ExpectedFound::new(a_is_expected, a, b)),
+ }
}
}
diff --git a/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs b/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs
index 9d7a9fefd..4124c9ead 100644
--- a/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs
+++ b/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs
@@ -459,7 +459,6 @@ impl<'cx, 'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'cx, 'tcx> {
ty::Closure(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Bool
| ty::Char
| ty::Int(..)
@@ -522,6 +521,17 @@ impl<'cx, 'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'cx, 'tcx> {
}
}
}
+ ty::ConstKind::Infer(InferConst::EffectVar(vid)) => {
+ match self.infcx.probe_effect_var(vid) {
+ Some(value) => return self.fold_const(value.as_const(self.infcx.tcx)),
+ None => {
+ return self.canonicalize_const_var(
+ CanonicalVarInfo { kind: CanonicalVarKind::Effect },
+ ct,
+ );
+ }
+ }
+ }
ty::ConstKind::Infer(InferConst::Fresh(_)) => {
bug!("encountered a fresh const during canonicalization")
}
@@ -690,7 +700,8 @@ impl<'cx, 'tcx> Canonicalizer<'cx, 'tcx> {
.iter()
.map(|v| CanonicalVarInfo {
kind: match v.kind {
- CanonicalVarKind::Ty(CanonicalTyVarKind::Int | CanonicalTyVarKind::Float) => {
+ CanonicalVarKind::Ty(CanonicalTyVarKind::Int | CanonicalTyVarKind::Float)
+ | CanonicalVarKind::Effect => {
return *v;
}
CanonicalVarKind::Ty(CanonicalTyVarKind::General(u)) => {
@@ -764,7 +775,7 @@ impl<'cx, 'tcx> Canonicalizer<'cx, 'tcx> {
r: ty::Region<'tcx>,
) -> ty::Region<'tcx> {
let var = self.canonical_var(info, r.into());
- let br = ty::BoundRegion { var, kind: ty::BrAnon(None) };
+ let br = ty::BoundRegion { var, kind: ty::BrAnon };
ty::Region::new_late_bound(self.interner(), self.binder_index, br)
}
diff --git a/compiler/rustc_infer/src/infer/canonical/mod.rs b/compiler/rustc_infer/src/infer/canonical/mod.rs
index 8ca2e4030..41787ee29 100644
--- a/compiler/rustc_infer/src/infer/canonical/mod.rs
+++ b/compiler/rustc_infer/src/infer/canonical/mod.rs
@@ -151,7 +151,11 @@ impl<'tcx> InferCtxt<'tcx> {
universe_map(ui),
)
.into(),
-
+ CanonicalVarKind::Effect => {
+ let vid = self.inner.borrow_mut().effect_unification_table().new_key(None);
+ ty::Const::new_infer(self.tcx, ty::InferConst::EffectVar(vid), self.tcx.types.bool)
+ .into()
+ }
CanonicalVarKind::PlaceholderConst(ty::PlaceholderConst { universe, bound }, ty) => {
let universe_mapped = universe_map(universe);
let placeholder_mapped = ty::PlaceholderConst { universe: universe_mapped, bound };
diff --git a/compiler/rustc_infer/src/infer/combine.rs b/compiler/rustc_infer/src/infer/combine.rs
index ddc8e7e50..ee13eb027 100644
--- a/compiler/rustc_infer/src/infer/combine.rs
+++ b/compiler/rustc_infer/src/infer/combine.rs
@@ -30,7 +30,7 @@ use super::{DefineOpaqueTypes, InferCtxt, TypeTrace};
use crate::infer::generalize::{self, CombineDelegate, Generalization};
use crate::traits::{Obligation, PredicateObligations};
use rustc_middle::infer::canonical::OriginalQueryValues;
-use rustc_middle::infer::unify_key::{ConstVarValue, ConstVariableValue};
+use rustc_middle::infer::unify_key::{ConstVarValue, ConstVariableValue, EffectVarValue};
use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind};
use rustc_middle::ty::error::{ExpectedFound, TypeError};
use rustc_middle::ty::relate::{RelateResult, TypeRelation};
@@ -91,7 +91,7 @@ impl<'tcx> InferCtxt<'tcx> {
.borrow_mut()
.float_unification_table()
.unify_var_var(a_id, b_id)
- .map_err(|e| float_unification_error(relation.a_is_expected(), e))?;
+ .map_err(|e| float_unification_error(a_is_expected, e))?;
Ok(a)
}
(&ty::Infer(ty::FloatVar(v_id)), &ty::Float(v)) => {
@@ -210,10 +210,30 @@ impl<'tcx> InferCtxt<'tcx> {
return Ok(a);
}
+ (
+ ty::ConstKind::Infer(InferConst::EffectVar(a_vid)),
+ ty::ConstKind::Infer(InferConst::EffectVar(b_vid)),
+ ) => {
+ self.inner
+ .borrow_mut()
+ .effect_unification_table()
+ .unify_var_var(a_vid, b_vid)
+ .map_err(|a| effect_unification_error(self.tcx, relation.a_is_expected(), a))?;
+ return Ok(a);
+ }
+
// All other cases of inference with other variables are errors.
- (ty::ConstKind::Infer(InferConst::Var(_)), ty::ConstKind::Infer(_))
- | (ty::ConstKind::Infer(_), ty::ConstKind::Infer(InferConst::Var(_))) => {
- bug!("tried to combine ConstKind::Infer/ConstKind::Infer(InferConst::Var)")
+ (
+ ty::ConstKind::Infer(InferConst::Var(_) | InferConst::EffectVar(_)),
+ ty::ConstKind::Infer(_),
+ )
+ | (
+ ty::ConstKind::Infer(_),
+ ty::ConstKind::Infer(InferConst::Var(_) | InferConst::EffectVar(_)),
+ ) => {
+ bug!(
+ "tried to combine ConstKind::Infer/ConstKind::Infer(InferConst::Var): {a:?} and {b:?}"
+ )
}
(ty::ConstKind::Infer(InferConst::Var(vid)), _) => {
@@ -223,6 +243,23 @@ impl<'tcx> InferCtxt<'tcx> {
(_, ty::ConstKind::Infer(InferConst::Var(vid))) => {
return self.unify_const_variable(vid, a, relation.param_env());
}
+
+ (ty::ConstKind::Infer(InferConst::EffectVar(vid)), _) => {
+ return self.unify_effect_variable(
+ relation.a_is_expected(),
+ vid,
+ EffectVarValue::Const(b),
+ );
+ }
+
+ (_, ty::ConstKind::Infer(InferConst::EffectVar(vid))) => {
+ return self.unify_effect_variable(
+ !relation.a_is_expected(),
+ vid,
+ EffectVarValue::Const(a),
+ );
+ }
+
(ty::ConstKind::Unevaluated(..), _) | (_, ty::ConstKind::Unevaluated(..))
if self.tcx.features().generic_const_exprs || self.next_trait_solver() =>
{
@@ -340,6 +377,20 @@ impl<'tcx> InferCtxt<'tcx> {
.map_err(|e| float_unification_error(vid_is_expected, e))?;
Ok(Ty::new_float(self.tcx, val))
}
+
+ fn unify_effect_variable(
+ &self,
+ vid_is_expected: bool,
+ vid: ty::EffectVid<'tcx>,
+ val: EffectVarValue<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ self.inner
+ .borrow_mut()
+ .effect_unification_table()
+ .unify_var_value(vid, Some(val))
+ .map_err(|e| effect_unification_error(self.tcx, vid_is_expected, e))?;
+ Ok(val.as_const(self.tcx))
+ }
}
impl<'infcx, 'tcx> CombineFields<'infcx, 'tcx> {
@@ -493,3 +544,11 @@ fn float_unification_error<'tcx>(
let (ty::FloatVarValue(a), ty::FloatVarValue(b)) = v;
TypeError::FloatMismatch(ExpectedFound::new(a_is_expected, a, b))
}
+
+fn effect_unification_error<'tcx>(
+ _tcx: TyCtxt<'tcx>,
+ _a_is_expected: bool,
+ (_a, _b): (EffectVarValue<'tcx>, EffectVarValue<'tcx>),
+) -> TypeError<'tcx> {
+ bug!("unexpected effect unification error")
+}
diff --git a/compiler/rustc_infer/src/infer/equate.rs b/compiler/rustc_infer/src/infer/equate.rs
index 1dbab48fd..665297da2 100644
--- a/compiler/rustc_infer/src/infer/equate.rs
+++ b/compiler/rustc_infer/src/infer/equate.rs
@@ -119,26 +119,6 @@ impl<'tcx> TypeRelation<'tcx> for Equate<'_, '_, 'tcx> {
.obligations,
);
}
- // Optimization of GeneratorWitness relation since we know that all
- // free regions are replaced with bound regions during construction.
- // This greatly speeds up equating of GeneratorWitness.
- (&ty::GeneratorWitness(a_types), &ty::GeneratorWitness(b_types)) => {
- let a_types = infcx.tcx.anonymize_bound_vars(a_types);
- let b_types = infcx.tcx.anonymize_bound_vars(b_types);
- if a_types.bound_vars() == b_types.bound_vars() {
- let (a_types, b_types) = infcx.instantiate_binder_with_placeholders(
- a_types.map_bound(|a_types| (a_types, b_types.skip_binder())),
- );
- for (a, b) in std::iter::zip(a_types, b_types) {
- self.relate(a, b)?;
- }
- } else {
- return Err(ty::error::TypeError::Sorts(ty::relate::expected_found(
- self, a, b,
- )));
- }
- }
-
_ => {
self.fields.infcx.super_combine_tys(self, a, b)?;
}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/mod.rs b/compiler/rustc_infer/src/infer/error_reporting/mod.rs
index ac5468f3d..72cfc1337 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/mod.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/mod.rs
@@ -242,12 +242,9 @@ fn msg_span_from_named_region<'tcx>(
};
(text, Some(span))
}
- ty::BrAnon(span) => (
+ ty::BrAnon => (
"the anonymous lifetime as defined here".to_string(),
- Some(match span {
- Some(span) => span,
- None => tcx.def_span(scope)
- })
+ Some(tcx.def_span(scope))
),
_ => (
format!("the lifetime `{region}` as defined here"),
@@ -262,11 +259,7 @@ fn msg_span_from_named_region<'tcx>(
..
}) => (format!("the lifetime `{name}` as defined here"), Some(tcx.def_span(def_id))),
ty::RePlaceholder(ty::PlaceholderRegion {
- bound: ty::BoundRegion { kind: ty::BoundRegionKind::BrAnon(Some(span)), .. },
- ..
- }) => ("the anonymous lifetime defined here".to_owned(), Some(span)),
- ty::RePlaceholder(ty::PlaceholderRegion {
- bound: ty::BoundRegion { kind: ty::BoundRegionKind::BrAnon(None), .. },
+ bound: ty::BoundRegion { kind: ty::BoundRegionKind::BrAnon, .. },
..
}) => ("an anonymous lifetime".to_owned(), None),
_ => bug!("{:?}", region),
@@ -1616,7 +1609,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
// | expected `()`, found closure
// |
// = note: expected unit type `()`
- // found closure `[closure@$DIR/issue-20862.rs:2:5: 2:14 x:_]`
+ // found closure `{closure@$DIR/issue-20862.rs:2:5: 2:14 x:_}`
//
// Also ignore opaque `Future`s that come from async fns.
if !self.ignore_span.overlaps(span)
@@ -1642,8 +1635,8 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
ValuePairs::Terms(infer::ExpectedFound { expected, found }) => {
match (expected.unpack(), found.unpack()) {
(ty::TermKind::Ty(expected), ty::TermKind::Ty(found)) => {
- let is_simple_err =
- expected.is_simple_text() && found.is_simple_text();
+ let is_simple_err = expected.is_simple_text(self.tcx)
+ && found.is_simple_text(self.tcx);
OpaqueTypesVisitor::visit_expected_found(
self.tcx, expected, found, span,
)
@@ -1660,7 +1653,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
_ => (false, Mismatch::Fixed("type")),
}
}
- ValuePairs::Sigs(infer::ExpectedFound { expected, found }) => {
+ ValuePairs::PolySigs(infer::ExpectedFound { expected, found }) => {
OpaqueTypesVisitor::visit_expected_found(self.tcx, expected, found, span)
.report(diag);
(false, Mismatch::Fixed("signature"))
@@ -1885,7 +1878,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
}
s
};
- if !(values.expected.is_simple_text() && values.found.is_simple_text())
+ if !(values.expected.is_simple_text(self.tcx) && values.found.is_simple_text(self.tcx))
|| (exp_found.is_some_and(|ef| {
// This happens when the type error is a subset of the expectation,
// like when you have two references but one is `usize` and the other
@@ -2232,15 +2225,12 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
ret => ret,
}
}
- infer::Sigs(exp_found) => {
+ infer::PolySigs(exp_found) => {
let exp_found = self.resolve_vars_if_possible(exp_found);
if exp_found.references_error() {
return None;
}
- let (exp, fnd) = self.cmp_fn_sig(
- &ty::Binder::dummy(exp_found.expected),
- &ty::Binder::dummy(exp_found.found),
- );
+ let (exp, fnd) = self.cmp_fn_sig(&exp_found.expected, &exp_found.found);
Some((exp, fnd, None, None))
}
}
@@ -2927,6 +2917,7 @@ impl<'tcx> ObligationCauseExt<'tcx> for ObligationCause<'tcx> {
| IfExpression { .. }
| LetElse
| StartFunctionType
+ | LangFunctionType(_)
| IntrinsicType
| MethodReceiver => Error0308,
@@ -2971,6 +2962,9 @@ impl<'tcx> ObligationCauseExt<'tcx> for ObligationCause<'tcx> {
LetElse => ObligationCauseFailureCode::NoDiverge { span, subdiags },
MainFunctionType => ObligationCauseFailureCode::FnMainCorrectType { span },
StartFunctionType => ObligationCauseFailureCode::FnStartCorrectType { span, subdiags },
+ &LangFunctionType(lang_item_name) => {
+ ObligationCauseFailureCode::FnLangCorrectType { span, subdiags, lang_item_name }
+ }
IntrinsicType => ObligationCauseFailureCode::IntrinsicCorrectType { span, subdiags },
MethodReceiver => ObligationCauseFailureCode::MethodCorrectType { span, subdiags },
@@ -3006,6 +3000,7 @@ impl<'tcx> ObligationCauseExt<'tcx> for ObligationCause<'tcx> {
IfExpressionWithNoElse => "`if` missing an `else` returns `()`",
MainFunctionType => "`main` function has the correct type",
StartFunctionType => "`#[start]` function has the correct type",
+ LangFunctionType(_) => "lang item function has the correct type",
IntrinsicType => "intrinsic has the correct type",
MethodReceiver => "method receiver has the correct type",
_ => "types are compatible",
@@ -3028,6 +3023,7 @@ impl IntoDiagnosticArg for ObligationCauseAsDiagArg<'_> {
IfExpressionWithNoElse => "no_else",
MainFunctionType => "fn_main_correct_type",
StartFunctionType => "fn_start_correct_type",
+ LangFunctionType(_) => "fn_lang_correct_type",
IntrinsicType => "intrinsic_correct_type",
MethodReceiver => "method_correct_type",
_ => "other",
diff --git a/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs b/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs
index f2a3c47bd..a9029a8ce 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs
@@ -1,5 +1,5 @@
use crate::errors::{
- AmbiguousImpl, AmbiguousReturn, AnnotationRequired, InferenceBadError, NeedTypeInfoInGenerator,
+ AmbiguousImpl, AmbiguousReturn, AnnotationRequired, InferenceBadError,
SourceKindMultiSuggestion, SourceKindSubdiag,
};
use crate::infer::error_reporting::TypeErrCtxt;
@@ -595,39 +595,6 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
}
}
-impl<'tcx> InferCtxt<'tcx> {
- pub fn need_type_info_err_in_generator(
- &self,
- kind: hir::GeneratorKind,
- span: Span,
- ty: ty::Term<'tcx>,
- ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- let ty = self.resolve_vars_if_possible(ty);
- let data = self.extract_inference_diagnostics_data(ty.into(), None);
-
- NeedTypeInfoInGenerator {
- bad_label: data.make_bad_error(span),
- span,
- generator_kind: GeneratorKindAsDiagArg(kind),
- }
- .into_diagnostic(&self.tcx.sess.parse_sess.span_diagnostic)
- }
-}
-
-pub struct GeneratorKindAsDiagArg(pub hir::GeneratorKind);
-
-impl IntoDiagnosticArg for GeneratorKindAsDiagArg {
- fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> {
- let kind = match self.0 {
- hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) => "async_block",
- hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Closure) => "async_closure",
- hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Fn) => "async_fn",
- hir::GeneratorKind::Gen => "generator",
- };
- rustc_errors::DiagnosticArgValue::Str(kind.into())
- }
-}
-
#[derive(Debug)]
struct InferSource<'tcx> {
span: Span,
@@ -951,7 +918,7 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
//
// See the `need_type_info/issue-103053.rs` test for
// a example.
- if !matches!(path.res, Res::Def(DefKind::TyAlias { .. }, _)) => {
+ if !matches!(path.res, Res::Def(DefKind::TyAlias, _)) => {
if let Some(ty) = self.opt_node_type(expr.hir_id)
&& let ty::Adt(_, args) = ty.kind()
{
@@ -1080,7 +1047,7 @@ impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
) => {
if tcx.res_generics_def_id(path.res) != Some(def.did()) {
match path.res {
- Res::Def(DefKind::TyAlias { .. }, _) => {
+ Res::Def(DefKind::TyAlias, _) => {
// FIXME: Ideally we should support this. For that
// we have to map back from the self type to the
// type alias though. That's difficult.
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/named_anon_conflict.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/named_anon_conflict.rs
index 07f04ec1e..1b43022f8 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/named_anon_conflict.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/named_anon_conflict.rs
@@ -61,7 +61,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
let is_impl_item = region_info.is_impl_item;
match br {
- ty::BrNamed(_, kw::UnderscoreLifetime) | ty::BrAnon(..) => {}
+ ty::BrNamed(_, kw::UnderscoreLifetime) | ty::BrAnon => {}
_ => {
/* not an anonymous region */
debug!("try_report_named_anon_conflict: not an anonymous region");
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs
index f903f7a49..4aec28b05 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs
@@ -385,7 +385,7 @@ impl<'tcx> NiceRegionError<'_, 'tcx> {
let highlight_trait_ref = |trait_ref| Highlighted {
tcx: self.tcx(),
- highlight: RegionHighlightMode::new(self.tcx()),
+ highlight: RegionHighlightMode::default(),
value: trait_ref,
};
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_relation.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_relation.rs
index 8a78a1956..f5b891253 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_relation.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_relation.rs
@@ -36,15 +36,13 @@ impl<'tcx> NiceRegionError<'_, 'tcx> {
ty::BrNamed(def_id, symbol) => {
(Some(self.tcx().def_span(def_id)), Some(symbol))
}
- ty::BrAnon(span) => (*span, None),
- ty::BrEnv => (None, None),
+ ty::BrAnon | ty::BrEnv => (None, None),
};
let (sup_span, sup_symbol) = match sup_name {
ty::BrNamed(def_id, symbol) => {
(Some(self.tcx().def_span(def_id)), Some(symbol))
}
- ty::BrAnon(span) => (*span, None),
- ty::BrEnv => (None, None),
+ ty::BrAnon | ty::BrEnv => (None, None),
};
let diag = match (sub_span, sup_span, sub_symbol, sup_symbol) {
(Some(sub_span), Some(sup_span), Some(&sub_symbol), Some(&sup_symbol)) => {
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/trait_impl_difference.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/trait_impl_difference.rs
index 12d38ced0..cb51254a1 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/trait_impl_difference.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/trait_impl_difference.rs
@@ -35,14 +35,14 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
&& let (Subtype(sup_trace), Subtype(sub_trace)) = (&sup_origin, &sub_origin)
&& let CompareImplItemObligation { trait_item_def_id, .. } = sub_trace.cause.code()
&& sub_trace.values == sup_trace.values
- && let ValuePairs::Sigs(ExpectedFound { expected, found }) = sub_trace.values
+ && let ValuePairs::PolySigs(ExpectedFound { expected, found }) = sub_trace.values
{
// FIXME(compiler-errors): Don't like that this needs `Ty`s, but
// all of the region highlighting machinery only deals with those.
let guar = self.emit_err(
var_origin.span(),
- Ty::new_fn_ptr(self.cx.tcx,ty::Binder::dummy(expected)),
- Ty::new_fn_ptr(self.cx.tcx,ty::Binder::dummy(found)),
+ Ty::new_fn_ptr(self.cx.tcx, expected),
+ Ty::new_fn_ptr(self.cx.tcx, found),
*trait_item_def_id,
);
return Some(guar);
@@ -67,9 +67,9 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
}
impl<'tcx> HighlightBuilder<'tcx> {
- fn build(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> RegionHighlightMode<'tcx> {
+ fn build(ty: Ty<'tcx>) -> RegionHighlightMode<'tcx> {
let mut builder =
- HighlightBuilder { highlight: RegionHighlightMode::new(tcx), counter: 1 };
+ HighlightBuilder { highlight: RegionHighlightMode::default(), counter: 1 };
builder.visit_ty(ty);
builder.highlight
}
@@ -85,12 +85,12 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
}
}
- let expected_highlight = HighlightBuilder::build(self.tcx(), expected);
+ let expected_highlight = HighlightBuilder::build(expected);
let expected = self
.cx
.extract_inference_diagnostics_data(expected.into(), Some(expected_highlight))
.name;
- let found_highlight = HighlightBuilder::build(self.tcx(), found);
+ let found_highlight = HighlightBuilder::build(found);
let found =
self.cx.extract_inference_diagnostics_data(found.into(), Some(found_highlight)).name;
diff --git a/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs b/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs
index 372539d73..5c3beee28 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs
@@ -266,12 +266,10 @@ impl<T> Trait<T> for X {
}
}
}
- (ty::FnPtr(_), ty::FnDef(def, _))
- if let hir::def::DefKind::Fn = tcx.def_kind(def) => {
- diag.note(
- "when the arguments and return types match, functions can be coerced \
- to function pointers",
- );
+ (ty::FnPtr(sig), ty::FnDef(def_id, _)) | (ty::FnDef(def_id, _), ty::FnPtr(sig)) => {
+ if tcx.fn_sig(*def_id).skip_binder().unsafety() < sig.unsafety() {
+ diag.note("unsafe functions cannot be coerced into safe function pointers");
+ }
}
_ => {}
}
@@ -618,9 +616,13 @@ fn foo(&self) -> Self::T { String::new() }
for item in &items[..] {
if let hir::AssocItemKind::Type = item.kind {
let assoc_ty = tcx.type_of(item.id.owner_id).instantiate_identity();
-
- if self.infcx.can_eq(param_env, assoc_ty, found) {
- diag.span_label(item.span, "expected this associated type");
+ if let hir::Defaultness::Default { has_value: true } = tcx.defaultness(item.id.owner_id)
+ && self.infcx.can_eq(param_env, assoc_ty, found)
+ {
+ diag.span_label(
+ item.span,
+ "associated type is `default` and may be overridden",
+ );
return true;
}
}
diff --git a/compiler/rustc_infer/src/infer/free_regions.rs b/compiler/rustc_infer/src/infer/free_regions.rs
index 2402a7ea7..ed1a2a117 100644
--- a/compiler/rustc_infer/src/infer/free_regions.rs
+++ b/compiler/rustc_infer/src/infer/free_regions.rs
@@ -4,7 +4,7 @@
//! and use that to decide when one free region outlives another, and so forth.
use rustc_data_structures::transitive_relation::TransitiveRelation;
-use rustc_middle::ty::{Lift, Region, TyCtxt};
+use rustc_middle::ty::{Region, TyCtxt};
/// Combines a `FreeRegionMap` and a `TyCtxt`.
///
@@ -101,10 +101,3 @@ impl<'tcx> FreeRegionMap<'tcx> {
result
}
}
-
-impl<'a, 'tcx> Lift<'tcx> for FreeRegionMap<'a> {
- type Lifted = FreeRegionMap<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<FreeRegionMap<'tcx>> {
- self.relation.maybe_map(|fr| tcx.lift(fr)).map(|relation| FreeRegionMap { relation })
- }
-}
diff --git a/compiler/rustc_infer/src/infer/freshen.rs b/compiler/rustc_infer/src/infer/freshen.rs
index 689945d64..0596ce373 100644
--- a/compiler/rustc_infer/src/infer/freshen.rs
+++ b/compiler/rustc_infer/src/infer/freshen.rs
@@ -156,6 +156,21 @@ impl<'a, 'tcx> TypeFolder<TyCtxt<'tcx>> for TypeFreshener<'a, 'tcx> {
.known();
self.freshen_const(opt_ct, ty::InferConst::Var(v), ty::InferConst::Fresh, ct.ty())
}
+ ty::ConstKind::Infer(ty::InferConst::EffectVar(v)) => {
+ let opt_ct = self
+ .infcx
+ .inner
+ .borrow_mut()
+ .effect_unification_table()
+ .probe_value(v)
+ .map(|effect| effect.as_const(self.infcx.tcx));
+ self.freshen_const(
+ opt_ct,
+ ty::InferConst::EffectVar(v),
+ ty::InferConst::Fresh,
+ ct.ty(),
+ )
+ }
ty::ConstKind::Infer(ty::InferConst::Fresh(i)) => {
if i >= self.const_freshen_count {
bug!(
diff --git a/compiler/rustc_infer/src/infer/generalize.rs b/compiler/rustc_infer/src/infer/generalize.rs
index cf674d5dd..dd7f8d354 100644
--- a/compiler/rustc_infer/src/infer/generalize.rs
+++ b/compiler/rustc_infer/src/infer/generalize.rs
@@ -403,6 +403,7 @@ where
}
}
}
+ ty::ConstKind::Infer(InferConst::EffectVar(_)) => Ok(c),
// FIXME: remove this branch once `structurally_relate_consts` is fully
// structural.
ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def, args }) => {
diff --git a/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs b/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs
index 60d9d6578..cb6513639 100644
--- a/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs
+++ b/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs
@@ -15,7 +15,6 @@ use rustc_data_structures::graph::implementation::{
use rustc_data_structures::intern::Interned;
use rustc_index::{IndexSlice, IndexVec};
use rustc_middle::ty::fold::TypeFoldable;
-use rustc_middle::ty::PlaceholderRegion;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::ty::{ReEarlyBound, ReErased, ReError, ReFree, ReStatic};
use rustc_middle::ty::{ReLateBound, RePlaceholder, ReVar};
@@ -173,38 +172,6 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
}
}
- /// Gets the LUb of a given region and the empty region
- fn lub_empty(&self, a_region: Region<'tcx>) -> Result<Region<'tcx>, PlaceholderRegion> {
- match *a_region {
- ReLateBound(..) | ReErased => {
- bug!("cannot relate region: {:?}", a_region);
- }
-
- ReVar(v_id) => {
- span_bug!(
- self.var_infos[v_id].origin.span(),
- "lub invoked with non-concrete regions: {:?}",
- a_region,
- );
- }
-
- ReStatic => {
- // nothing lives longer than `'static`
- Ok(self.tcx().lifetimes.re_static)
- }
-
- ReError(_) => Ok(a_region),
-
- ReEarlyBound(_) | ReFree(_) => {
- // All empty regions are less than early-bound, free,
- // and scope regions.
- Ok(a_region)
- }
-
- RePlaceholder(placeholder) => Err(placeholder),
- }
- }
-
fn expansion(&self, var_values: &mut LexicalRegionResolutions<'tcx>) {
// In the first pass, we expand region vids according to constraints we
// have previously found. In the second pass, we loop through the region
@@ -247,27 +214,25 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
true
}
VarValue::Value(cur_region) => {
- let lub = match self.lub_empty(cur_region) {
- Ok(r) => r,
- // If the empty and placeholder regions are in the same universe,
- // then the LUB is the Placeholder region (which is the cur_region).
- // If they are not in the same universe, the LUB is the Static lifetime.
- Err(placeholder) if a_universe == placeholder.universe => {
- cur_region
+ match *cur_region {
+ // If this empty region is from a universe that can name the
+ // placeholder universe, then the LUB is the Placeholder region
+ // (which is the cur_region). Otherwise, the LUB is the Static
+ // lifetime.
+ RePlaceholder(placeholder)
+ if !a_universe.can_name(placeholder.universe) =>
+ {
+ let lub = self.tcx().lifetimes.re_static;
+ debug!(
+ "Expanding value of {:?} from {:?} to {:?}",
+ b_vid, cur_region, lub
+ );
+
+ *b_data = VarValue::Value(lub);
+ true
}
- Err(_) => self.tcx().lifetimes.re_static,
- };
-
- if lub == cur_region {
- false
- } else {
- debug!(
- "Expanding value of {:?} from {:?} to {:?}",
- b_vid, cur_region, lub
- );
-
- *b_data = VarValue::Value(lub);
- true
+
+ _ => false,
}
}
@@ -341,15 +306,19 @@ impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
match *b_data {
VarValue::Empty(empty_ui) => {
- let lub = match self.lub_empty(a_region) {
- Ok(r) => r,
- // If this empty region is from a universe that can
- // name the placeholder, then the placeholder is
- // larger; otherwise, the only ancestor is `'static`.
- Err(placeholder) if empty_ui.can_name(placeholder.universe) => {
- ty::Region::new_placeholder(self.tcx(), placeholder)
+ let lub = match *a_region {
+ RePlaceholder(placeholder) => {
+ // If this empty region is from a universe that can
+ // name the placeholder, then the placeholder is
+ // larger; otherwise, the only ancestor is `'static`.
+ if empty_ui.can_name(placeholder.universe) {
+ ty::Region::new_placeholder(self.tcx(), placeholder)
+ } else {
+ self.tcx().lifetimes.re_static
+ }
}
- Err(_) => self.tcx().lifetimes.re_static,
+
+ _ => a_region,
};
debug!("Expanding value of {:?} from empty lifetime to {:?}", b_vid, lub);
diff --git a/compiler/rustc_infer/src/infer/mod.rs b/compiler/rustc_infer/src/infer/mod.rs
index aaabf1482..aeb3177af 100644
--- a/compiler/rustc_infer/src/infer/mod.rs
+++ b/compiler/rustc_infer/src/infer/mod.rs
@@ -21,7 +21,7 @@ use rustc_data_structures::unify as ut;
use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed};
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_middle::infer::canonical::{Canonical, CanonicalVarValues};
-use rustc_middle::infer::unify_key::{ConstVarValue, ConstVariableValue};
+use rustc_middle::infer::unify_key::{ConstVarValue, ConstVariableValue, EffectVarValue};
use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind, ToType};
use rustc_middle::mir::interpret::{ErrorHandled, EvalToValTreeResult};
use rustc_middle::mir::ConstraintCategory;
@@ -33,13 +33,14 @@ use rustc_middle::ty::relate::RelateResult;
use rustc_middle::ty::visit::{TypeVisitable, TypeVisitableExt};
pub use rustc_middle::ty::IntVarValue;
use rustc_middle::ty::{self, GenericParamDefKind, InferConst, InferTy, Ty, TyCtxt};
-use rustc_middle::ty::{ConstVid, FloatVid, IntVid, TyVid};
+use rustc_middle::ty::{ConstVid, EffectVid, FloatVid, IntVid, TyVid};
use rustc_middle::ty::{GenericArg, GenericArgKind, GenericArgs, GenericArgsRef};
use rustc_span::symbol::Symbol;
use rustc_span::Span;
use std::cell::{Cell, RefCell};
use std::fmt;
+use std::marker::PhantomData;
use self::combine::CombineFields;
use self::error_reporting::TypeErrCtxt;
@@ -115,6 +116,9 @@ pub struct InferCtxtInner<'tcx> {
/// Map from floating variable to the kind of float it represents.
float_unification_storage: ut::UnificationTableStorage<ty::FloatVid>,
+ /// Map from effect variable to the effect param it represents.
+ effect_unification_storage: ut::UnificationTableStorage<ty::EffectVid<'tcx>>,
+
/// Tracks the set of region variables and the constraints between them.
///
/// This is initially `Some(_)` but when
@@ -172,6 +176,7 @@ impl<'tcx> InferCtxtInner<'tcx> {
const_unification_storage: ut::UnificationTableStorage::new(),
int_unification_storage: ut::UnificationTableStorage::new(),
float_unification_storage: ut::UnificationTableStorage::new(),
+ effect_unification_storage: ut::UnificationTableStorage::new(),
region_constraint_storage: Some(RegionConstraintStorage::new()),
region_obligations: vec![],
opaque_type_storage: Default::default(),
@@ -223,6 +228,10 @@ impl<'tcx> InferCtxtInner<'tcx> {
self.const_unification_storage.with_log(&mut self.undo_log)
}
+ fn effect_unification_table(&mut self) -> UnificationTable<'_, 'tcx, ty::EffectVid<'tcx>> {
+ self.effect_unification_storage.with_log(&mut self.undo_log)
+ }
+
#[inline]
pub fn unwrap_region_constraints(&mut self) -> RegionConstraintCollector<'_, 'tcx> {
self.region_constraint_storage
@@ -356,6 +365,7 @@ impl<'tcx> ty::InferCtxtLike<TyCtxt<'tcx>> for InferCtxt<'tcx> {
Err(universe) => Some(universe),
Ok(_) => None,
},
+ EffectVar(_) => None,
Fresh(_) => None,
}
}
@@ -373,7 +383,7 @@ pub enum ValuePairs<'tcx> {
Aliases(ExpectedFound<ty::AliasTy<'tcx>>),
TraitRefs(ExpectedFound<ty::TraitRef<'tcx>>),
PolyTraitRefs(ExpectedFound<ty::PolyTraitRef<'tcx>>),
- Sigs(ExpectedFound<ty::FnSig<'tcx>>),
+ PolySigs(ExpectedFound<ty::PolyFnSig<'tcx>>),
ExistentialTraitRef(ExpectedFound<ty::PolyExistentialTraitRef<'tcx>>),
ExistentialProjection(ExpectedFound<ty::PolyExistentialProjection<'tcx>>),
}
@@ -764,19 +774,32 @@ impl<'tcx> InferCtxt<'tcx> {
.collect();
vars.extend(
(0..inner.int_unification_table().len())
- .map(|i| ty::IntVid { index: i as u32 })
+ .map(|i| ty::IntVid::from_u32(i as u32))
.filter(|&vid| inner.int_unification_table().probe_value(vid).is_none())
.map(|v| Ty::new_int_var(self.tcx, v)),
);
vars.extend(
(0..inner.float_unification_table().len())
- .map(|i| ty::FloatVid { index: i as u32 })
+ .map(|i| ty::FloatVid::from_u32(i as u32))
.filter(|&vid| inner.float_unification_table().probe_value(vid).is_none())
.map(|v| Ty::new_float_var(self.tcx, v)),
);
vars
}
+ pub fn unsolved_effects(&self) -> Vec<ty::Const<'tcx>> {
+ let mut inner = self.inner.borrow_mut();
+ let mut table = inner.effect_unification_table();
+
+ (0..table.len())
+ .map(|i| ty::EffectVid { index: i as u32, phantom: PhantomData })
+ .filter(|&vid| table.probe_value(vid).is_none())
+ .map(|v| {
+ ty::Const::new_infer(self.tcx, ty::InferConst::EffectVar(v), self.tcx.types.bool)
+ })
+ .collect()
+ }
+
fn combine_fields<'a>(
&'a self,
trace: TypeTrace<'tcx>,
@@ -1158,7 +1181,10 @@ impl<'tcx> InferCtxt<'tcx> {
Ty::new_var(self.tcx, ty_var_id).into()
}
- GenericParamDefKind::Const { .. } => {
+ GenericParamDefKind::Const { is_host_effect, .. } => {
+ if is_host_effect {
+ return self.var_for_effect(param);
+ }
let origin = ConstVariableOrigin {
kind: ConstVariableOriginKind::ConstParameterDefinition(
param.name,
@@ -1184,6 +1210,17 @@ impl<'tcx> InferCtxt<'tcx> {
}
}
+ pub fn var_for_effect(&self, param: &ty::GenericParamDef) -> GenericArg<'tcx> {
+ let effect_vid = self.inner.borrow_mut().effect_unification_table().new_key(None);
+ let ty = self
+ .tcx
+ .type_of(param.def_id)
+ .no_bound_vars()
+ .expect("const parameter types cannot be generic");
+ debug_assert_eq!(self.tcx.types.bool, ty);
+ ty::Const::new_infer(self.tcx, ty::InferConst::EffectVar(effect_vid), ty).into()
+ }
+
/// Given a set of generics defined on a type or impl, returns a substitution mapping each
/// type/region parameter to a fresh inference variable.
pub fn fresh_args_for_item(&self, span: Span, def_id: DefId) -> GenericArgsRef<'tcx> {
@@ -1298,6 +1335,10 @@ impl<'tcx> InferCtxt<'tcx> {
self.inner.borrow_mut().const_unification_table().find(var)
}
+ pub fn root_effect_var(&self, var: ty::EffectVid<'tcx>) -> ty::EffectVid<'tcx> {
+ self.inner.borrow_mut().effect_unification_table().find(var)
+ }
+
/// Resolves an int var to a rigid int type, if it was constrained to one,
/// or else the root int var in the unification table.
pub fn opportunistic_resolve_int_var(&self, vid: ty::IntVid) -> Ty<'tcx> {
@@ -1369,6 +1410,10 @@ impl<'tcx> InferCtxt<'tcx> {
}
}
+ pub fn probe_effect_var(&self, vid: EffectVid<'tcx>) -> Option<EffectVarValue<'tcx>> {
+ self.inner.borrow_mut().effect_unification_table().probe_value(vid)
+ }
+
/// Attempts to resolve all type/region/const variables in
/// `value`. Region inference must have been run already (e.g.,
/// by calling `resolve_regions_and_report_errors`). If some
@@ -1555,9 +1600,12 @@ impl<'tcx> InferCtxt<'tcx> {
if let Some(ct) = tcx.thir_abstract_const(unevaluated.def)? {
let ct = tcx.expand_abstract_consts(ct.instantiate(tcx, args));
if let Err(e) = ct.error_reported() {
- return Err(ErrorHandled::Reported(e.into()));
+ return Err(ErrorHandled::Reported(
+ e.into(),
+ span.unwrap_or(rustc_span::DUMMY_SP),
+ ));
} else if ct.has_non_region_infer() || ct.has_non_region_param() {
- return Err(ErrorHandled::TooGeneric);
+ return Err(ErrorHandled::TooGeneric(span.unwrap_or(rustc_span::DUMMY_SP)));
} else {
args = replace_param_and_infer_args_with_placeholder(tcx, args);
}
@@ -1649,6 +1697,14 @@ impl<'tcx> InferCtxt<'tcx> {
ConstVariableValue::Known { .. } => true,
}
}
+
+ TyOrConstInferVar::Effect(v) => {
+ // If `probe_value` returns `Some`, it never equals
+ // `ty::ConstKind::Infer(ty::InferConst::Effect(v))`.
+ //
+ // Not `inlined_probe_value(v)` because this call site is colder.
+ self.probe_effect_var(v).is_some()
+ }
}
}
}
@@ -1720,6 +1776,8 @@ pub enum TyOrConstInferVar<'tcx> {
/// Equivalent to `ty::ConstKind::Infer(ty::InferConst::Var(_))`.
Const(ConstVid<'tcx>),
+ /// Equivalent to `ty::ConstKind::Infer(ty::InferConst::EffectVar(_))`.
+ Effect(EffectVid<'tcx>),
}
impl<'tcx> TyOrConstInferVar<'tcx> {
@@ -1750,6 +1808,7 @@ impl<'tcx> TyOrConstInferVar<'tcx> {
fn maybe_from_const(ct: ty::Const<'tcx>) -> Option<Self> {
match ct.kind() {
ty::ConstKind::Infer(InferConst::Var(v)) => Some(TyOrConstInferVar::Const(v)),
+ ty::ConstKind::Infer(InferConst::EffectVar(v)) => Some(TyOrConstInferVar::Effect(v)),
_ => None,
}
}
@@ -1793,17 +1852,24 @@ impl<'a, 'tcx> TypeFolder<TyCtxt<'tcx>> for ShallowResolver<'a, 'tcx> {
}
fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
- if let ty::ConstKind::Infer(InferConst::Var(vid)) = ct.kind() {
- self.infcx
+ match ct.kind() {
+ ty::ConstKind::Infer(InferConst::Var(vid)) => self
+ .infcx
.inner
.borrow_mut()
.const_unification_table()
.probe_value(vid)
.val
.known()
- .unwrap_or(ct)
- } else {
- ct
+ .unwrap_or(ct),
+ ty::ConstKind::Infer(InferConst::EffectVar(vid)) => self
+ .infcx
+ .inner
+ .borrow_mut()
+ .effect_unification_table()
+ .probe_value(vid)
+ .map_or(ct, |val| val.as_const(self.infcx.tcx)),
+ _ => ct,
}
}
}
diff --git a/compiler/rustc_infer/src/infer/outlives/components.rs b/compiler/rustc_infer/src/infer/outlives/components.rs
index 2ac9568f6..6a9d40daa 100644
--- a/compiler/rustc_infer/src/infer/outlives/components.rs
+++ b/compiler/rustc_infer/src/infer/outlives/components.rs
@@ -112,7 +112,7 @@ fn compute_components<'tcx>(
}
// All regions are bound inside a witness
- ty::GeneratorWitness(..) | ty::GeneratorWitnessMIR(..) => (),
+ ty::GeneratorWitness(..) => (),
// OutlivesTypeParameterEnv -- the actual checking that `X:'a`
// is implied by the environment is done in regionck.
diff --git a/compiler/rustc_infer/src/infer/sub.rs b/compiler/rustc_infer/src/infer/sub.rs
index 27e1ed56f..0c3bb633b 100644
--- a/compiler/rustc_infer/src/infer/sub.rs
+++ b/compiler/rustc_infer/src/infer/sub.rs
@@ -147,25 +147,6 @@ impl<'tcx> TypeRelation<'tcx> for Sub<'_, '_, 'tcx> {
);
Ok(a)
}
- // Optimization of GeneratorWitness relation since we know that all
- // free regions are replaced with bound regions during construction.
- // This greatly speeds up subtyping of GeneratorWitness.
- (&ty::GeneratorWitness(a_types), &ty::GeneratorWitness(b_types)) => {
- let a_types = infcx.tcx.anonymize_bound_vars(a_types);
- let b_types = infcx.tcx.anonymize_bound_vars(b_types);
- if a_types.bound_vars() == b_types.bound_vars() {
- let (a_types, b_types) = infcx.instantiate_binder_with_placeholders(
- a_types.map_bound(|a_types| (a_types, b_types.skip_binder())),
- );
- for (a, b) in std::iter::zip(a_types, b_types) {
- self.relate(a, b)?;
- }
- Ok(a)
- } else {
- Err(ty::error::TypeError::Sorts(ty::relate::expected_found(self, a, b)))
- }
- }
-
_ => {
self.fields.infcx.super_combine_tys(self, a, b)?;
Ok(a)
diff --git a/compiler/rustc_infer/src/infer/undo_log.rs b/compiler/rustc_infer/src/infer/undo_log.rs
index 25d06b21e..79144b3e6 100644
--- a/compiler/rustc_infer/src/infer/undo_log.rs
+++ b/compiler/rustc_infer/src/infer/undo_log.rs
@@ -24,6 +24,7 @@ pub(crate) enum UndoLog<'tcx> {
ConstUnificationTable(sv::UndoLog<ut::Delegate<ty::ConstVid<'tcx>>>),
IntUnificationTable(sv::UndoLog<ut::Delegate<ty::IntVid>>),
FloatUnificationTable(sv::UndoLog<ut::Delegate<ty::FloatVid>>),
+ EffectUnificationTable(sv::UndoLog<ut::Delegate<ty::EffectVid<'tcx>>>),
RegionConstraintCollector(region_constraints::UndoLog<'tcx>),
RegionUnificationTable(sv::UndoLog<ut::Delegate<RegionVidKey<'tcx>>>),
ProjectionCache(traits::UndoLog<'tcx>),
@@ -55,6 +56,7 @@ impl_from! {
IntUnificationTable(sv::UndoLog<ut::Delegate<ty::IntVid>>),
FloatUnificationTable(sv::UndoLog<ut::Delegate<ty::FloatVid>>),
+ EffectUnificationTable(sv::UndoLog<ut::Delegate<ty::EffectVid<'tcx>>>),
ConstUnificationTable(sv::UndoLog<ut::Delegate<ty::ConstVid<'tcx>>>),
@@ -71,6 +73,7 @@ impl<'tcx> Rollback<UndoLog<'tcx>> for InferCtxtInner<'tcx> {
UndoLog::ConstUnificationTable(undo) => self.const_unification_storage.reverse(undo),
UndoLog::IntUnificationTable(undo) => self.int_unification_storage.reverse(undo),
UndoLog::FloatUnificationTable(undo) => self.float_unification_storage.reverse(undo),
+ UndoLog::EffectUnificationTable(undo) => self.effect_unification_storage.reverse(undo),
UndoLog::RegionConstraintCollector(undo) => {
self.region_constraint_storage.as_mut().unwrap().reverse(undo)
}
diff --git a/compiler/rustc_interface/src/callbacks.rs b/compiler/rustc_interface/src/callbacks.rs
index bc6d7c209..45b1aeb4a 100644
--- a/compiler/rustc_interface/src/callbacks.rs
+++ b/compiler/rustc_interface/src/callbacks.rs
@@ -10,8 +10,10 @@
//! origin crate when the `TyCtxt` is not present in TLS.
use rustc_errors::{Diagnostic, TRACK_DIAGNOSTICS};
-use rustc_middle::dep_graph::TaskDepsRef;
+use rustc_middle::dep_graph::{DepNodeExt, TaskDepsRef};
use rustc_middle::ty::tls;
+use rustc_query_system::dep_graph::dep_node::default_dep_kind_debug;
+use rustc_query_system::dep_graph::{DepContext, DepKind, DepNode};
use std::fmt;
fn track_span_parent(def_id: rustc_span::def_id::LocalDefId) {
@@ -59,10 +61,49 @@ fn def_id_debug(def_id: rustc_hir::def_id::DefId, f: &mut fmt::Formatter<'_>) ->
write!(f, ")")
}
+/// This is a callback from `rustc_query_system` as it cannot access the implicit state
+/// in `rustc_middle` otherwise.
+pub fn dep_kind_debug(kind: DepKind, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ tls::with_opt(|opt_tcx| {
+ if let Some(tcx) = opt_tcx {
+ write!(f, "{}", tcx.dep_kind_info(kind).name)
+ } else {
+ default_dep_kind_debug(kind, f)
+ }
+ })
+}
+
+/// This is a callback from `rustc_query_system` as it cannot access the implicit state
+/// in `rustc_middle` otherwise.
+pub fn dep_node_debug(node: DepNode, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "{:?}(", node.kind)?;
+
+ tls::with_opt(|opt_tcx| {
+ if let Some(tcx) = opt_tcx {
+ if let Some(def_id) = node.extract_def_id(tcx) {
+ write!(f, "{}", tcx.def_path_debug_str(def_id))?;
+ } else if let Some(ref s) = tcx.dep_graph.dep_node_debug_str(node) {
+ write!(f, "{s}")?;
+ } else {
+ write!(f, "{}", node.hash)?;
+ }
+ } else {
+ write!(f, "{}", node.hash)?;
+ }
+ Ok(())
+ })?;
+
+ write!(f, ")")
+}
+
/// Sets up the callbacks in prior crates which we want to refer to the
/// TyCtxt in.
pub fn setup_callbacks() {
rustc_span::SPAN_TRACK.swap(&(track_span_parent as fn(_)));
rustc_hir::def_id::DEF_ID_DEBUG.swap(&(def_id_debug as fn(_, &mut fmt::Formatter<'_>) -> _));
+ rustc_query_system::dep_graph::dep_node::DEP_KIND_DEBUG
+ .swap(&(dep_kind_debug as fn(_, &mut fmt::Formatter<'_>) -> _));
+ rustc_query_system::dep_graph::dep_node::DEP_NODE_DEBUG
+ .swap(&(dep_node_debug as fn(_, &mut fmt::Formatter<'_>) -> _));
TRACK_DIAGNOSTICS.swap(&(track_diagnostic as _));
}
diff --git a/compiler/rustc_interface/src/interface.rs b/compiler/rustc_interface/src/interface.rs
index 5b417e008..1c330c064 100644
--- a/compiler/rustc_interface/src/interface.rs
+++ b/compiler/rustc_interface/src/interface.rs
@@ -9,7 +9,7 @@ use rustc_data_structures::sync::Lrc;
use rustc_errors::registry::Registry;
use rustc_errors::{ErrorGuaranteed, Handler};
use rustc_lint::LintStore;
-use rustc_middle::query::{ExternProviders, Providers};
+use rustc_middle::util::Providers;
use rustc_middle::{bug, ty};
use rustc_parse::maybe_new_parser_from_source_str;
use rustc_query_impl::QueryCtxt;
@@ -37,7 +37,7 @@ pub struct Compiler {
pub(crate) sess: Lrc<Session>,
codegen_backend: Lrc<dyn CodegenBackend>,
pub(crate) register_lints: Option<Box<dyn Fn(&Session, &mut LintStore) + Send + Sync>>,
- pub(crate) override_queries: Option<fn(&Session, &mut Providers, &mut ExternProviders)>,
+ pub(crate) override_queries: Option<fn(&Session, &mut Providers)>,
}
impl Compiler {
@@ -271,7 +271,7 @@ pub struct Config {
/// the list of queries.
///
/// The second parameter is local providers and the third parameter is external providers.
- pub override_queries: Option<fn(&Session, &mut Providers, &mut ExternProviders)>,
+ pub override_queries: Option<fn(&Session, &mut Providers)>,
/// This is a callback from the driver that is called to create a codegen backend.
pub make_codegen_backend:
@@ -279,6 +279,12 @@ pub struct Config {
/// Registry of diagnostics codes.
pub registry: Registry,
+
+ /// All commandline args used to invoke the compiler, with @file args fully expanded.
+ /// This will only be used within debug info, e.g. in the pdb file on windows
+ /// This is mainly useful for other tools that reads that debuginfo to figure out
+ /// how to call the compiler with the same arguments.
+ pub expanded_args: Vec<String>,
}
// JUSTIFICATION: before session exists, only config
@@ -317,6 +323,7 @@ pub fn run_compiler<R: Send>(config: Config, f: impl FnOnce(&Compiler) -> R + Se
config.make_codegen_backend,
registry.clone(),
config.ice_file,
+ config.expanded_args,
);
if let Some(parse_sess_created) = config.parse_sess_created {
diff --git a/compiler/rustc_interface/src/lib.rs b/compiler/rustc_interface/src/lib.rs
index 51bd8381e..76131c1ad 100644
--- a/compiler/rustc_interface/src/lib.rs
+++ b/compiler/rustc_interface/src/lib.rs
@@ -25,7 +25,7 @@ pub mod util;
pub use callbacks::setup_callbacks;
pub use interface::{run_compiler, Config};
-pub use passes::{DEFAULT_EXTERN_QUERY_PROVIDERS, DEFAULT_QUERY_PROVIDERS};
+pub use passes::DEFAULT_QUERY_PROVIDERS;
pub use queries::Queries;
#[cfg(test)]
diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs
index 18a669175..0e8f93cef 100644
--- a/compiler/rustc_interface/src/passes.rs
+++ b/compiler/rustc_interface/src/passes.rs
@@ -8,7 +8,7 @@ use rustc_borrowck as mir_borrowck;
use rustc_codegen_ssa::traits::CodegenBackend;
use rustc_data_structures::parallel;
use rustc_data_structures::steal::Steal;
-use rustc_data_structures::sync::{Lrc, OnceCell, WorkerLocal};
+use rustc_data_structures::sync::{Lrc, OnceLock, WorkerLocal};
use rustc_errors::PResult;
use rustc_expand::base::{ExtCtxt, LintStoreExpand};
use rustc_feature::Features;
@@ -18,11 +18,11 @@ use rustc_lint::{unerased_lint_store, BufferedEarlyLint, EarlyCheckNode, LintSto
use rustc_metadata::creader::CStore;
use rustc_middle::arena::Arena;
use rustc_middle::dep_graph::DepGraph;
-use rustc_middle::query::{ExternProviders, Providers};
use rustc_middle::ty::{self, GlobalCtxt, RegisteredTools, TyCtxt};
+use rustc_middle::util::Providers;
use rustc_mir_build as mir_build;
use rustc_parse::{parse_crate_from_file, parse_crate_from_source_str, validate_attr};
-use rustc_passes::{self, hir_stats, layout_test};
+use rustc_passes::{self, abi_test, hir_stats, layout_test};
use rustc_plugin_impl as plugin;
use rustc_resolve::Resolver;
use rustc_session::code_stats::VTableSizeInfo;
@@ -584,7 +584,7 @@ fn resolver_for_lowering<'tcx>(
let krate = configure_and_expand(krate, &pre_configured_attrs, &mut resolver);
// Make sure we don't mutate the cstore from here on.
- tcx.untracked().cstore.leak();
+ tcx.untracked().cstore.freeze();
let ty::ResolverOutputs {
global_ctxt: untracked_resolutions,
@@ -675,13 +675,6 @@ pub static DEFAULT_QUERY_PROVIDERS: LazyLock<Providers> = LazyLock::new(|| {
*providers
});
-pub static DEFAULT_EXTERN_QUERY_PROVIDERS: LazyLock<ExternProviders> = LazyLock::new(|| {
- let mut extern_providers = ExternProviders::default();
- rustc_metadata::provide_extern(&mut extern_providers);
- rustc_codegen_ssa::provide_extern(&mut extern_providers);
- extern_providers
-});
-
pub fn create_global_ctxt<'tcx>(
compiler: &'tcx Compiler,
crate_types: Vec<CrateType>,
@@ -689,7 +682,7 @@ pub fn create_global_ctxt<'tcx>(
lint_store: Lrc<LintStore>,
dep_graph: DepGraph,
untracked: Untracked,
- gcx_cell: &'tcx OnceCell<GlobalCtxt<'tcx>>,
+ gcx_cell: &'tcx OnceLock<GlobalCtxt<'tcx>>,
arena: &'tcx WorkerLocal<Arena<'tcx>>,
hir_arena: &'tcx WorkerLocal<rustc_hir::Arena<'tcx>>,
) -> &'tcx GlobalCtxt<'tcx> {
@@ -702,14 +695,11 @@ pub fn create_global_ctxt<'tcx>(
let query_result_on_disk_cache = rustc_incremental::load_query_result_cache(sess);
let codegen_backend = compiler.codegen_backend();
- let mut local_providers = *DEFAULT_QUERY_PROVIDERS;
- codegen_backend.provide(&mut local_providers);
-
- let mut extern_providers = *DEFAULT_EXTERN_QUERY_PROVIDERS;
- codegen_backend.provide_extern(&mut extern_providers);
+ let mut providers = *DEFAULT_QUERY_PROVIDERS;
+ codegen_backend.provide(&mut providers);
if let Some(callback) = compiler.override_queries {
- callback(sess, &mut local_providers, &mut extern_providers);
+ callback(sess, &mut providers);
}
let incremental = dep_graph.is_fully_enabled();
@@ -727,11 +717,12 @@ pub fn create_global_ctxt<'tcx>(
dep_graph,
rustc_query_impl::query_callbacks(arena),
rustc_query_impl::query_system(
- local_providers,
- extern_providers,
+ providers.queries,
+ providers.extern_queries,
query_result_on_disk_cache,
incremental,
),
+ providers.hooks,
)
})
})
@@ -743,12 +734,11 @@ fn analysis(tcx: TyCtxt<'_>, (): ()) -> Result<()> {
rustc_passes::hir_id_validator::check_crate(tcx);
let sess = tcx.sess;
- let mut entry_point = None;
sess.time("misc_checking_1", || {
parallel!(
{
- entry_point = sess.time("looking_for_entry_point", || tcx.entry_fn(()));
+ sess.time("looking_for_entry_point", || tcx.ensure().entry_fn(()));
sess.time("looking_for_derive_registrar", || {
tcx.ensure().proc_macro_decls_static(())
@@ -808,16 +798,15 @@ fn analysis(tcx: TyCtxt<'_>, (): ()) -> Result<()> {
}
});
- if tcx.sess.opts.unstable_opts.drop_tracking_mir {
- tcx.hir().par_body_owners(|def_id| {
- if let rustc_hir::def::DefKind::Generator = tcx.def_kind(def_id) {
- tcx.ensure().mir_generator_witnesses(def_id);
- tcx.ensure().check_generator_obligations(def_id);
- }
- });
- }
+ tcx.hir().par_body_owners(|def_id| {
+ if let rustc_hir::def::DefKind::Generator = tcx.def_kind(def_id) {
+ tcx.ensure().mir_generator_witnesses(def_id);
+ tcx.ensure().check_generator_obligations(def_id);
+ }
+ });
sess.time("layout_testing", || layout_test::test_layout(tcx));
+ sess.time("abi_testing", || abi_test::test_abi(tcx));
// Avoid overwhelming user with errors if borrow checking failed.
// I'm not sure how helpful this is, to be honest, but it avoids a
@@ -862,7 +851,7 @@ fn analysis(tcx: TyCtxt<'_>, (): ()) -> Result<()> {
// This check has to be run after all lints are done processing. We don't
// define a lint filter, as all lint checks should have finished at this point.
- sess.time("check_lint_expectations", || tcx.check_expectations(None));
+ sess.time("check_lint_expectations", || tcx.ensure().check_expectations(None));
});
if sess.opts.unstable_opts.print_vtable_sizes {
diff --git a/compiler/rustc_interface/src/queries.rs b/compiler/rustc_interface/src/queries.rs
index fc71c6c7e..fe253febf 100644
--- a/compiler/rustc_interface/src/queries.rs
+++ b/compiler/rustc_interface/src/queries.rs
@@ -7,10 +7,10 @@ use rustc_codegen_ssa::traits::CodegenBackend;
use rustc_codegen_ssa::CodegenResults;
use rustc_data_structures::steal::Steal;
use rustc_data_structures::svh::Svh;
-use rustc_data_structures::sync::{AppendOnlyIndexVec, Lrc, OnceCell, RwLock, WorkerLocal};
+use rustc_data_structures::sync::{AppendOnlyIndexVec, FreezeLock, Lrc, OnceLock, WorkerLocal};
use rustc_hir::def_id::{StableCrateId, CRATE_DEF_ID, LOCAL_CRATE};
use rustc_hir::definitions::Definitions;
-use rustc_incremental::DepGraphFuture;
+use rustc_incremental::setup_dep_graph;
use rustc_metadata::creader::CStore;
use rustc_middle::arena::Arena;
use rustc_middle::dep_graph::DepGraph;
@@ -19,7 +19,6 @@ use rustc_session::config::{self, CrateType, OutputFilenames, OutputType};
use rustc_session::cstore::Untracked;
use rustc_session::{output::find_crate_name, Session};
use rustc_span::symbol::sym;
-use rustc_span::Symbol;
use std::any::Any;
use std::cell::{RefCell, RefMut};
use std::sync::Arc;
@@ -78,7 +77,7 @@ impl<T> Default for Query<T> {
pub struct Queries<'tcx> {
compiler: &'tcx Compiler,
- gcx_cell: OnceCell<GlobalCtxt<'tcx>>,
+ gcx_cell: OnceLock<GlobalCtxt<'tcx>>,
arena: WorkerLocal<Arena<'tcx>>,
hir_arena: WorkerLocal<rustc_hir::Arena<'tcx>>,
@@ -93,7 +92,7 @@ impl<'tcx> Queries<'tcx> {
pub fn new(compiler: &'tcx Compiler) -> Queries<'tcx> {
Queries {
compiler,
- gcx_cell: OnceCell::new(),
+ gcx_cell: OnceLock::new(),
arena: WorkerLocal::new(|_| Arena::default()),
hir_arena: WorkerLocal::new(|_| rustc_hir::Arena::default()),
parse: Default::default(),
@@ -114,6 +113,7 @@ impl<'tcx> Queries<'tcx> {
.compute(|| passes::parse(self.session()).map_err(|mut parse_error| parse_error.emit()))
}
+ #[deprecated = "pre_configure may be made private in the future. If you need it please open an issue with your use case."]
pub fn pre_configure(&self) -> Result<QueryResult<'_, (ast::Crate, ast::AttrVec)>> {
self.pre_configure.compute(|| {
let mut krate = self.parse()?.steal();
@@ -131,46 +131,10 @@ impl<'tcx> Queries<'tcx> {
})
}
- fn dep_graph_future(
- &self,
- crate_name: Symbol,
- stable_crate_id: StableCrateId,
- ) -> Result<Option<DepGraphFuture>> {
- let sess = self.session();
-
- // `load_dep_graph` can only be called after `prepare_session_directory`.
- rustc_incremental::prepare_session_directory(sess, crate_name, stable_crate_id)?;
- let res = sess.opts.build_dep_graph().then(|| rustc_incremental::load_dep_graph(sess));
-
- if sess.opts.incremental.is_some() {
- sess.time("incr_comp_garbage_collect_session_directories", || {
- if let Err(e) = rustc_incremental::garbage_collect_session_directories(sess) {
- warn!(
- "Error while trying to garbage collect incremental \
- compilation cache directory: {}",
- e
- );
- }
- });
- }
-
- Ok(res)
- }
-
- fn dep_graph(&self, dep_graph_future: Option<DepGraphFuture>) -> DepGraph {
- dep_graph_future
- .and_then(|future| {
- let sess = self.session();
- let (prev_graph, prev_work_products) =
- sess.time("blocked_on_dep_graph_loading", || future.open().open(sess));
- rustc_incremental::build_dep_graph(sess, prev_graph, prev_work_products)
- })
- .unwrap_or_else(DepGraph::new_disabled)
- }
-
pub fn global_ctxt(&'tcx self) -> Result<QueryResult<'_, &'tcx GlobalCtxt<'tcx>>> {
self.gcx.compute(|| {
let sess = self.session();
+ #[allow(deprecated)]
let (krate, pre_configured_attrs) = self.pre_configure()?.steal();
// parse `#[crate_name]` even if `--crate-name` was passed, to make sure it matches.
@@ -182,10 +146,7 @@ impl<'tcx> Queries<'tcx> {
sess.opts.cg.metadata.clone(),
sess.cfg_version,
);
-
- // Compute the dependency graph (in the background). We want to do this as early as
- // possible, to give the DepGraph maximum time to load before `dep_graph` is called.
- let dep_graph_future = self.dep_graph_future(crate_name, stable_crate_id)?;
+ let dep_graph = setup_dep_graph(sess, crate_name, stable_crate_id)?;
let lint_store = Lrc::new(passes::create_lint_store(
sess,
@@ -193,11 +154,11 @@ impl<'tcx> Queries<'tcx> {
self.compiler.register_lints.as_deref(),
&pre_configured_attrs,
));
- let cstore = RwLock::new(Box::new(CStore::new(
+ let cstore = FreezeLock::new(Box::new(CStore::new(
self.codegen_backend().metadata_loader(),
stable_crate_id,
)) as _);
- let definitions = RwLock::new(Definitions::new(stable_crate_id));
+ let definitions = FreezeLock::new(Definitions::new(stable_crate_id));
let source_span = AppendOnlyIndexVec::new();
let _id = source_span.push(krate.spans.inner_span);
debug_assert_eq!(_id, CRATE_DEF_ID);
@@ -208,7 +169,7 @@ impl<'tcx> Queries<'tcx> {
crate_types,
stable_crate_id,
lint_store,
- self.dep_graph(dep_graph_future),
+ dep_graph,
untracked,
&self.gcx_cell,
&self.arena,
diff --git a/compiler/rustc_interface/src/tests.rs b/compiler/rustc_interface/src/tests.rs
index e3d66d183..2510ce714 100644
--- a/compiler/rustc_interface/src/tests.rs
+++ b/compiler/rustc_interface/src/tests.rs
@@ -68,6 +68,7 @@ fn mk_session(handler: &mut EarlyErrorHandler, matches: getopts::Matches) -> (Se
None,
"",
None,
+ Default::default(),
);
(sess, cfg)
}
@@ -683,7 +684,6 @@ fn test_unstable_options_tracking_hash() {
untracked!(dep_tasks, true);
untracked!(dont_buffer_diagnostics, true);
untracked!(dump_dep_graph, true);
- untracked!(dump_drop_tracking_cfg, Some("cfg.dot".to_string()));
untracked!(dump_mir, Some(String::from("abc")));
untracked!(dump_mir_dataflow, true);
untracked!(dump_mir_dir, String::from("abc"));
@@ -703,7 +703,7 @@ fn test_unstable_options_tracking_hash() {
untracked!(keep_hygiene_data, true);
untracked!(link_native_libraries, false);
untracked!(llvm_time_trace, true);
- untracked!(ls, true);
+ untracked!(ls, vec!["all".to_owned()]);
untracked!(macro_backtrace, true);
untracked!(meta_stats, true);
untracked!(mir_include_spans, true);
@@ -772,7 +772,6 @@ fn test_unstable_options_tracking_hash() {
tracked!(debug_info_for_profiling, true);
tracked!(debug_macros, true);
tracked!(dep_info_omit_d_target, true);
- tracked!(drop_tracking, true);
tracked!(dual_proc_macros, true);
tracked!(dwarf_version, Some(5));
tracked!(emit_thin_lto, false);
@@ -807,6 +806,7 @@ fn test_unstable_options_tracking_hash() {
tracked!(no_jump_tables, true);
tracked!(no_link, true);
tracked!(no_profiler_runtime, true);
+ tracked!(no_trait_vptr, true);
tracked!(no_unique_section_names, true);
tracked!(oom, OomStrategy::Panic);
tracked!(osx_rpath_install_name, true);
diff --git a/compiler/rustc_interface/src/util.rs b/compiler/rustc_interface/src/util.rs
index ad35dbbc8..0634e44c5 100644
--- a/compiler/rustc_interface/src/util.rs
+++ b/compiler/rustc_interface/src/util.rs
@@ -71,6 +71,7 @@ pub fn create_session(
>,
descriptions: Registry,
ice_file: Option<PathBuf>,
+ expanded_args: Vec<String>,
) -> (Session, Box<dyn CodegenBackend>) {
let codegen_backend = if let Some(make_codegen_backend) = make_codegen_backend {
make_codegen_backend(&sopts)
@@ -113,6 +114,7 @@ pub fn create_session(
target_override,
rustc_version_str().unwrap_or("unknown"),
ice_file,
+ expanded_args,
);
codegen_backend.init(&sess);
@@ -137,10 +139,8 @@ fn get_stack_size() -> Option<usize> {
env::var_os("RUST_MIN_STACK").is_none().then_some(STACK_SIZE)
}
-#[cfg(not(parallel_compiler))]
-pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
+pub(crate) fn run_in_thread_with_globals<F: FnOnce() -> R + Send, R: Send>(
edition: Edition,
- _threads: usize,
f: F,
) -> R {
// The "thread pool" is a single spawned thread in the non-parallel
@@ -171,18 +171,37 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
})
}
+#[cfg(not(parallel_compiler))]
+pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
+ edition: Edition,
+ _threads: usize,
+ f: F,
+) -> R {
+ run_in_thread_with_globals(edition, f)
+}
+
#[cfg(parallel_compiler)]
pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
edition: Edition,
threads: usize,
f: F,
) -> R {
- use rustc_data_structures::jobserver;
+ use rustc_data_structures::{jobserver, sync::FromDyn};
use rustc_middle::ty::tls;
use rustc_query_impl::QueryCtxt;
use rustc_query_system::query::{deadlock, QueryContext};
let registry = sync::Registry::new(threads);
+
+ if !sync::is_dyn_thread_safe() {
+ return run_in_thread_with_globals(edition, || {
+ // Register the thread for use with the `WorkerLocal` type.
+ registry.register();
+
+ f()
+ });
+ }
+
let mut builder = rayon::ThreadPoolBuilder::new()
.thread_name(|_| "rustc".to_string())
.acquire_thread_handler(jobserver::acquire_thread)
@@ -191,13 +210,13 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
.deadlock_handler(|| {
// On deadlock, creates a new thread and forwards information in thread
// locals to it. The new thread runs the deadlock handler.
- let query_map = tls::with(|tcx| {
+ let query_map = FromDyn::from(tls::with(|tcx| {
QueryCtxt::new(tcx)
.try_collect_active_jobs()
.expect("active jobs shouldn't be locked in deadlock handler")
- });
+ }));
let registry = rayon_core::Registry::current();
- thread::spawn(move || deadlock(query_map, &registry));
+ thread::spawn(move || deadlock(query_map.into_inner(), &registry));
});
if let Some(size) = get_stack_size() {
builder = builder.stack_size(size);
@@ -209,6 +228,7 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
// `Send` in the parallel compiler.
rustc_span::create_session_globals_then(edition, || {
rustc_span::with_session_globals(|session_globals| {
+ let session_globals = FromDyn::from(session_globals);
builder
.build_scoped(
// Initialize each new worker thread when created.
@@ -216,7 +236,9 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
// Register the thread for use with the `WorkerLocal` type.
registry.register();
- rustc_span::set_session_globals_then(session_globals, || thread.run())
+ rustc_span::set_session_globals_then(session_globals.into_inner(), || {
+ thread.run()
+ })
},
// Run `f` on the first thread in the thread pool.
move |pool: &rayon::ThreadPool| pool.install(f),
@@ -546,6 +568,13 @@ pub fn build_output_filenames(attrs: &[ast::Attribute], sess: &Session) -> Outpu
) {
sess.emit_fatal(errors::MultipleOutputTypesToStdout);
}
+
+ let crate_name = sess
+ .opts
+ .crate_name
+ .clone()
+ .or_else(|| rustc_attr::find_crate_name(attrs).map(|n| n.to_string()));
+
match sess.io.output_file {
None => {
// "-" as input file will cause the parser to read from stdin so we
@@ -554,15 +583,11 @@ pub fn build_output_filenames(attrs: &[ast::Attribute], sess: &Session) -> Outpu
let dirpath = sess.io.output_dir.clone().unwrap_or_default();
// If a crate name is present, we use it as the link name
- let stem = sess
- .opts
- .crate_name
- .clone()
- .or_else(|| rustc_attr::find_crate_name(attrs).map(|n| n.to_string()))
- .unwrap_or_else(|| sess.io.input.filestem().to_owned());
+ let stem = crate_name.clone().unwrap_or_else(|| sess.io.input.filestem().to_owned());
OutputFilenames::new(
dirpath,
+ crate_name.unwrap_or_else(|| stem.replace('-', "_")),
stem,
None,
sess.io.temps_dir.clone(),
@@ -587,9 +612,12 @@ pub fn build_output_filenames(attrs: &[ast::Attribute], sess: &Session) -> Outpu
sess.emit_warning(errors::IgnoringOutDir);
}
+ let out_filestem =
+ out_file.filestem().unwrap_or_default().to_str().unwrap().to_string();
OutputFilenames::new(
out_file.parent().unwrap_or_else(|| Path::new("")).to_path_buf(),
- out_file.filestem().unwrap_or_default().to_str().unwrap().to_string(),
+ crate_name.unwrap_or_else(|| out_filestem.replace('-', "_")),
+ out_filestem,
ofile,
sess.io.temps_dir.clone(),
sess.opts.cg.extra_filename.clone(),
diff --git a/compiler/rustc_lint/messages.ftl b/compiler/rustc_lint/messages.ftl
index c4a7f7178..7377c6e2f 100644
--- a/compiler/rustc_lint/messages.ftl
+++ b/compiler/rustc_lint/messages.ftl
@@ -156,15 +156,8 @@ lint_builtin_unused_doc_comment = unused doc comment
lint_builtin_while_true = denote infinite loops with `loop {"{"} ... {"}"}`
.suggestion = use `loop`
-lint_check_name_deprecated = lint name `{$lint_name}` is deprecated and does not have an effect anymore. Use: {$new_name}
-
-lint_check_name_unknown = unknown lint: `{$lint_name}`
- .help = did you mean: `{$suggestion}`
-
lint_check_name_unknown_tool = unknown lint tool: `{$tool_name}`
-lint_check_name_warning = {$msg}
-
lint_command_line_source = `forbid` lint level was set on command line
lint_confusable_identifier_pair = found both `{$existing_sym}` and `{$sym}` as identifiers, which look alike
@@ -185,6 +178,7 @@ lint_default_source = `forbid` lint level is the default for {$id}
lint_deprecated_lint_name =
lint name `{$name}` is deprecated and may not have an effect in the future.
.suggestion = change it to
+ .help = change it to {$replace}
lint_diag_out_of_impl =
diagnostics should only be created in `IntoDiagnostic`/`AddToDiagnostic` impls
@@ -323,6 +317,8 @@ lint_invalid_reference_casting_assign_to_ref = assigning to `&T` is undefined be
lint_invalid_reference_casting_borrow_as_mut = casting `&T` to `&mut T` is undefined behavior, even if the reference is unused, consider instead using an `UnsafeCell`
.label = casting happend here
+lint_invalid_reference_casting_note_book = for more information, visit <https://doc.rust-lang.org/book/ch15-05-interior-mutability.html>
+
lint_lintpass_by_hand = implementing `LintPass` by hand
.help = try using `declare_lint_pass!` or `impl_lint_pass!` instead
@@ -457,6 +453,8 @@ lint_ptr_null_checks_fn_ptr = function pointers are not nullable, so checking th
.help = wrap the function pointer inside an `Option` and use `Option::is_none` to check for null pointer value
.label = expression has type `{$orig_ty}`
+lint_ptr_null_checks_fn_ret = returned pointer of `{$fn_name}` call is never null, so checking it for null will always return false
+
lint_ptr_null_checks_ref = references are not nullable, so checking them for null will always return false
.label = expression has type `{$orig_ty}`
@@ -482,8 +480,11 @@ lint_redundant_semicolons =
*[false] this semicolon
}
-lint_renamed_or_removed_lint = {$msg}
+lint_removed_lint = lint `{$name}` has been removed: {$reason}
+
+lint_renamed_lint = lint `{$name}` has been renamed to `{$replace}`
.suggestion = use the new name
+ .help = use the new name `{$replace}`
lint_requested_level = requested on the command line with `{$level} {$lint_name}`
@@ -521,6 +522,7 @@ lint_unknown_gated_lint =
lint_unknown_lint =
unknown lint: `{$name}`
.suggestion = did you mean
+ .help = did you mean: `{$replace}`
lint_unknown_tool_in_scoped_lint = unknown tool name `{$tool_name}` found in scoped lint: `{$tool_name}::{$lint_name}`
.help = add `#![register_tool({$tool_name})]` to the crate root
diff --git a/compiler/rustc_lint/src/array_into_iter.rs b/compiler/rustc_lint/src/array_into_iter.rs
index d0967ba56..814991cd8 100644
--- a/compiler/rustc_lint/src/array_into_iter.rs
+++ b/compiler/rustc_lint/src/array_into_iter.rs
@@ -34,8 +34,8 @@ declare_lint! {
Warn,
"detects calling `into_iter` on arrays in Rust 2015 and 2018",
@future_incompatible = FutureIncompatibleInfo {
- reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/IntoIterator-for-arrays.html>",
reason: FutureIncompatibilityReason::EditionSemanticsChange(Edition::Edition2021),
+ reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/IntoIterator-for-arrays.html>",
};
}
diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs
index 4b6917fdf..536f78a73 100644
--- a/compiler/rustc_lint/src/builtin.rs
+++ b/compiler/rustc_lint/src/builtin.rs
@@ -41,7 +41,6 @@ use crate::{
},
EarlyContext, EarlyLintPass, LateContext, LateLintPass, Level, LintContext,
};
-use hir::IsAsync;
use rustc_ast::attr;
use rustc_ast::tokenstream::{TokenStream, TokenTree};
use rustc_ast::visit::{FnCtxt, FnKind};
@@ -845,8 +844,8 @@ declare_lint! {
Warn,
"detects anonymous parameters",
@future_incompatible = FutureIncompatibleInfo {
- reference: "issue #41686 <https://github.com/rust-lang/rust/issues/41686>",
reason: FutureIncompatibilityReason::EditionError(Edition::Edition2018),
+ reference: "issue #41686 <https://github.com/rust-lang/rust/issues/41686>",
};
}
@@ -1001,8 +1000,22 @@ impl EarlyLintPass for UnusedDocComment {
warn_if_doc(cx, arm_span, "match arms", &arm.attrs);
}
+ fn check_pat(&mut self, cx: &EarlyContext<'_>, pat: &ast::Pat) {
+ if let ast::PatKind::Struct(_, _, fields, _) = &pat.kind {
+ for field in fields {
+ warn_if_doc(cx, field.span, "pattern fields", &field.attrs);
+ }
+ }
+ }
+
fn check_expr(&mut self, cx: &EarlyContext<'_>, expr: &ast::Expr) {
warn_if_doc(cx, expr.span, "expressions", &expr.attrs);
+
+ if let ExprKind::Struct(s) = &expr.kind {
+ for field in &s.fields {
+ warn_if_doc(cx, field.span, "expression fields", &field.attrs);
+ }
+ }
}
fn check_generic_param(&mut self, cx: &EarlyContext<'_>, param: &ast::GenericParam) {
@@ -1280,7 +1293,7 @@ impl<'tcx> LateLintPass<'tcx> for UngatedAsyncFnTrackCaller {
span: Span,
def_id: LocalDefId,
) {
- if fn_kind.asyncness() == IsAsync::Async
+ if fn_kind.asyncness().is_async()
&& !cx.tcx.features().async_fn_track_caller
// Now, check if the function has the `#[track_caller]` attribute
&& let Some(attr) = cx.tcx.get_attr(def_id, sym::track_caller)
@@ -1441,13 +1454,13 @@ impl<'tcx> LateLintPass<'tcx> for TypeAliasBounds {
fn check_item(&mut self, cx: &LateContext<'_>, item: &hir::Item<'_>) {
let hir::ItemKind::TyAlias(hir_ty, type_alias_generics) = &item.kind else { return };
- if cx.tcx.features().lazy_type_alias {
- // Bounds of lazy type aliases are respected.
+ // Bounds of lazy type aliases and TAITs are respected.
+ if cx.tcx.type_alias_is_lazy(item.owner_id) {
return;
}
let ty = cx.tcx.type_of(item.owner_id).skip_binder();
- if ty.has_opaque_types() || ty.has_inherent_projections() {
+ if ty.has_inherent_projections() {
// Bounds of type aliases that contain opaque types or inherent projections are respected.
// E.g: `type X = impl Trait;`, `type X = (impl Trait, Y);`, `type X = Type::Inherent;`.
return;
@@ -1656,8 +1669,8 @@ declare_lint! {
Warn,
"`...` range patterns are deprecated",
@future_incompatible = FutureIncompatibleInfo {
- reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/warnings-promoted-to-error.html>",
reason: FutureIncompatibilityReason::EditionError(Edition::Edition2021),
+ reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/warnings-promoted-to-error.html>",
};
}
@@ -1791,8 +1804,8 @@ declare_lint! {
Allow,
"detects edition keywords being used as an identifier",
@future_incompatible = FutureIncompatibleInfo {
- reference: "issue #49716 <https://github.com/rust-lang/rust/issues/49716>",
reason: FutureIncompatibilityReason::EditionError(Edition::Edition2018),
+ reference: "issue #49716 <https://github.com/rust-lang/rust/issues/49716>",
};
}
diff --git a/compiler/rustc_lint/src/context.rs b/compiler/rustc_lint/src/context.rs
index f73797415..3c5cde430 100644
--- a/compiler/rustc_lint/src/context.rs
+++ b/compiler/rustc_lint/src/context.rs
@@ -16,10 +16,6 @@
use self::TargetLint::*;
-use crate::errors::{
- CheckNameDeprecated, CheckNameUnknown, CheckNameUnknownTool, CheckNameWarning, RequestedLevel,
- UnsupportedGroup,
-};
use crate::levels::LintLevelsBuilder;
use crate::passes::{EarlyLintPassObject, LateLintPassObject};
use rustc_ast::util::unicode::TEXT_FLOW_CONTROL_CHARS;
@@ -124,9 +120,10 @@ pub enum CheckLintNameResult<'a> {
NoLint(Option<Symbol>),
/// The lint refers to a tool that has not been registered.
NoTool,
- /// The lint is either renamed or removed. This is the warning
- /// message, and an optional new name (`None` if removed).
- Warning(String, Option<String>),
+ /// The lint has been renamed to a new name.
+ Renamed(String),
+ /// The lint has been removed due to the given reason.
+ Removed(String),
/// The lint is from a tool. If the Option is None, then either
/// the lint does not exist in the tool or the code was not
/// compiled with the tool and therefore the lint was never
@@ -329,51 +326,6 @@ impl LintStore {
}
}
- /// Checks the validity of lint names derived from the command line.
- pub fn check_lint_name_cmdline(
- &self,
- sess: &Session,
- lint_name: &str,
- level: Level,
- registered_tools: &RegisteredTools,
- ) {
- let (tool_name, lint_name_only) = parse_lint_and_tool_name(lint_name);
- if lint_name_only == crate::WARNINGS.name_lower() && matches!(level, Level::ForceWarn(_)) {
- sess.emit_err(UnsupportedGroup { lint_group: crate::WARNINGS.name_lower() });
- return;
- }
- let lint_name = lint_name.to_string();
- match self.check_lint_name(lint_name_only, tool_name, registered_tools) {
- CheckLintNameResult::Warning(msg, _) => {
- sess.emit_warning(CheckNameWarning {
- msg,
- sub: RequestedLevel { level, lint_name },
- });
- }
- CheckLintNameResult::NoLint(suggestion) => {
- sess.emit_err(CheckNameUnknown {
- lint_name: lint_name.clone(),
- suggestion,
- sub: RequestedLevel { level, lint_name },
- });
- }
- CheckLintNameResult::Tool(Err((Some(_), new_name))) => {
- sess.emit_warning(CheckNameDeprecated {
- lint_name: lint_name.clone(),
- new_name,
- sub: RequestedLevel { level, lint_name },
- });
- }
- CheckLintNameResult::NoTool => {
- sess.emit_err(CheckNameUnknownTool {
- tool_name: tool_name.unwrap(),
- sub: RequestedLevel { level, lint_name },
- });
- }
- _ => {}
- };
- }
-
/// True if this symbol represents a lint group name.
pub fn is_lint_group(&self, lint_name: Symbol) -> bool {
debug!(
@@ -445,14 +397,8 @@ impl LintStore {
}
}
match self.by_name.get(&complete_name) {
- Some(Renamed(new_name, _)) => CheckLintNameResult::Warning(
- format!("lint `{complete_name}` has been renamed to `{new_name}`"),
- Some(new_name.to_owned()),
- ),
- Some(Removed(reason)) => CheckLintNameResult::Warning(
- format!("lint `{complete_name}` has been removed: {reason}"),
- None,
- ),
+ Some(Renamed(new_name, _)) => CheckLintNameResult::Renamed(new_name.to_string()),
+ Some(Removed(reason)) => CheckLintNameResult::Removed(reason.to_string()),
None => match self.lint_groups.get(&*complete_name) {
// If neither the lint, nor the lint group exists check if there is a `clippy::`
// variant of this lint
@@ -966,6 +912,14 @@ pub trait LintContext: Sized {
Applicability::MachineApplicable
);
}
+ BuiltinLintDiagnostics::AssociatedConstElidedLifetime { elided, span } => {
+ db.span_suggestion_verbose(
+ if elided { span.shrink_to_hi() } else { span },
+ "use the `'static` lifetime",
+ if elided { "'static " } else { "'static" },
+ Applicability::MachineApplicable
+ );
+ }
}
// Rewrap `db`, and pass control to the user.
decorate(db)
@@ -1361,6 +1315,91 @@ impl<'tcx> LateContext<'tcx> {
tcx.try_normalize_erasing_regions(self.param_env, proj).ok()
})
}
+
+ /// If the given expression is a local binding, find the initializer expression.
+ /// If that initializer expression is another local binding, find its initializer again.
+ ///
+ /// This process repeats as long as possible (but usually no more than once).
+ /// Type-check adjustments are not taken in account in this function.
+ ///
+ /// Examples:
+ /// ```
+ /// let abc = 1;
+ /// let def = abc + 2;
+ /// // ^^^^^^^ output
+ /// let def = def;
+ /// dbg!(def);
+ /// // ^^^ input
+ /// ```
+ pub fn expr_or_init<'a>(&self, mut expr: &'a hir::Expr<'tcx>) -> &'a hir::Expr<'tcx> {
+ expr = expr.peel_blocks();
+
+ while let hir::ExprKind::Path(ref qpath) = expr.kind
+ && let Some(parent_node) = match self.qpath_res(qpath, expr.hir_id) {
+ Res::Local(hir_id) => self.tcx.hir().find_parent(hir_id),
+ _ => None,
+ }
+ && let Some(init) = match parent_node {
+ hir::Node::Expr(expr) => Some(expr),
+ hir::Node::Local(hir::Local { init, .. }) => *init,
+ _ => None
+ }
+ {
+ expr = init.peel_blocks();
+ }
+ expr
+ }
+
+ /// If the given expression is a local binding, find the initializer expression.
+ /// If that initializer expression is another local or **outside** (`const`/`static`)
+ /// binding, find its initializer again.
+ ///
+ /// This process repeats as long as possible (but usually no more than once).
+ /// Type-check adjustments are not taken in account in this function.
+ ///
+ /// Examples:
+ /// ```
+ /// const ABC: i32 = 1;
+ /// // ^ output
+ /// let def = ABC;
+ /// dbg!(def);
+ /// // ^^^ input
+ ///
+ /// // or...
+ /// let abc = 1;
+ /// let def = abc + 2;
+ /// // ^^^^^^^ output
+ /// dbg!(def);
+ /// // ^^^ input
+ /// ```
+ pub fn expr_or_init_with_outside_body<'a>(
+ &self,
+ mut expr: &'a hir::Expr<'tcx>,
+ ) -> &'a hir::Expr<'tcx> {
+ expr = expr.peel_blocks();
+
+ while let hir::ExprKind::Path(ref qpath) = expr.kind
+ && let Some(parent_node) = match self.qpath_res(qpath, expr.hir_id) {
+ Res::Local(hir_id) => self.tcx.hir().find_parent(hir_id),
+ Res::Def(_, def_id) => self.tcx.hir().get_if_local(def_id),
+ _ => None,
+ }
+ && let Some(init) = match parent_node {
+ hir::Node::Expr(expr) => Some(expr),
+ hir::Node::Local(hir::Local { init, .. }) => *init,
+ hir::Node::Item(item) => match item.kind {
+ hir::ItemKind::Const(.., body_id) | hir::ItemKind::Static(.., body_id) => {
+ Some(self.tcx.hir().body(body_id).value)
+ }
+ _ => None
+ }
+ _ => None
+ }
+ {
+ expr = init.peel_blocks();
+ }
+ expr
+ }
}
impl<'tcx> abi::HasDataLayout for LateContext<'tcx> {
@@ -1392,14 +1431,3 @@ impl<'tcx> LayoutOfHelpers<'tcx> for LateContext<'tcx> {
err
}
}
-
-pub fn parse_lint_and_tool_name(lint_name: &str) -> (Option<Symbol>, &str) {
- match lint_name.split_once("::") {
- Some((tool_name, lint_name)) => {
- let tool_name = Symbol::intern(tool_name);
-
- (Some(tool_name), lint_name)
- }
- None => (None, lint_name),
- }
-}
diff --git a/compiler/rustc_lint/src/deref_into_dyn_supertrait.rs b/compiler/rustc_lint/src/deref_into_dyn_supertrait.rs
index 851c6493d..9be2edf84 100644
--- a/compiler/rustc_lint/src/deref_into_dyn_supertrait.rs
+++ b/compiler/rustc_lint/src/deref_into_dyn_supertrait.rs
@@ -5,6 +5,7 @@ use crate::{
use rustc_hir as hir;
use rustc_middle::{traits::util::supertraits, ty};
+use rustc_session::lint::FutureIncompatibilityReason;
use rustc_span::sym;
declare_lint! {
@@ -48,6 +49,7 @@ declare_lint! {
Warn,
"`Deref` implementation usage with a supertrait trait object for output might be shadowed in the future",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #89460 <https://github.com/rust-lang/rust/issues/89460>",
};
}
diff --git a/compiler/rustc_lint/src/early.rs b/compiler/rustc_lint/src/early.rs
index 211ea8f43..d102e3a6c 100644
--- a/compiler/rustc_lint/src/early.rs
+++ b/compiler/rustc_lint/src/early.rs
@@ -228,6 +228,7 @@ impl<'a, T: EarlyLintPass> ast_visit::Visitor<'a> for EarlyContextAndPass<'a, T>
}) => self.check_id(closure_id),
_ => {}
}
+ lint_callback!(self, check_expr_post, e);
}
fn visit_generic_arg(&mut self, arg: &'a ast::GenericArg) {
diff --git a/compiler/rustc_lint/src/errors.rs b/compiler/rustc_lint/src/errors.rs
index 68167487a..eccea35c7 100644
--- a/compiler/rustc_lint/src/errors.rs
+++ b/compiler/rustc_lint/src/errors.rs
@@ -1,7 +1,5 @@
use crate::fluent_generated as fluent;
-use rustc_errors::{
- AddToDiagnostic, Diagnostic, ErrorGuaranteed, Handler, IntoDiagnostic, SubdiagnosticMessage,
-};
+use rustc_errors::{AddToDiagnostic, Diagnostic, SubdiagnosticMessage};
use rustc_macros::{Diagnostic, Subdiagnostic};
use rustc_session::lint::Level;
use rustc_span::{Span, Symbol};
@@ -91,9 +89,9 @@ pub struct BuiltinEllipsisInclusiveRangePatterns {
#[derive(Subdiagnostic)]
#[note(lint_requested_level)]
-pub struct RequestedLevel {
+pub struct RequestedLevel<'a> {
pub level: Level,
- pub lint_name: String,
+ pub lint_name: &'a str,
}
#[derive(Diagnostic)]
@@ -102,50 +100,10 @@ pub struct UnsupportedGroup {
pub lint_group: String,
}
-pub struct CheckNameUnknown {
- pub lint_name: String,
- pub suggestion: Option<Symbol>,
- pub sub: RequestedLevel,
-}
-
-impl IntoDiagnostic<'_> for CheckNameUnknown {
- fn into_diagnostic(
- self,
- handler: &Handler,
- ) -> rustc_errors::DiagnosticBuilder<'_, ErrorGuaranteed> {
- let mut diag = handler.struct_err(fluent::lint_check_name_unknown);
- diag.code(rustc_errors::error_code!(E0602));
- if let Some(suggestion) = self.suggestion {
- diag.help(fluent::lint_help);
- diag.set_arg("suggestion", suggestion);
- }
- diag.set_arg("lint_name", self.lint_name);
- diag.subdiagnostic(self.sub);
- diag
- }
-}
-
#[derive(Diagnostic)]
#[diag(lint_check_name_unknown_tool, code = "E0602")]
-pub struct CheckNameUnknownTool {
+pub struct CheckNameUnknownTool<'a> {
pub tool_name: Symbol,
#[subdiagnostic]
- pub sub: RequestedLevel,
-}
-
-#[derive(Diagnostic)]
-#[diag(lint_check_name_warning)]
-pub struct CheckNameWarning {
- pub msg: String,
- #[subdiagnostic]
- pub sub: RequestedLevel,
-}
-
-#[derive(Diagnostic)]
-#[diag(lint_check_name_deprecated)]
-pub struct CheckNameDeprecated {
- pub lint_name: String,
- pub new_name: String,
- #[subdiagnostic]
- pub sub: RequestedLevel,
+ pub sub: RequestedLevel<'a>,
}
diff --git a/compiler/rustc_lint/src/foreign_modules.rs b/compiler/rustc_lint/src/foreign_modules.rs
index 7b291d558..e1df69bda 100644
--- a/compiler/rustc_lint/src/foreign_modules.rs
+++ b/compiler/rustc_lint/src/foreign_modules.rs
@@ -5,19 +5,18 @@ use rustc_hir::def::DefKind;
use rustc_middle::query::Providers;
use rustc_middle::ty::layout::LayoutError;
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
-use rustc_session::lint::{lint_array, LintArray};
use rustc_span::{sym, Span, Symbol};
use rustc_target::abi::FIRST_VARIANT;
use crate::lints::{BuiltinClashingExtern, BuiltinClashingExternSub};
-use crate::types;
+use crate::{types, LintVec};
pub(crate) fn provide(providers: &mut Providers) {
*providers = Providers { clashing_extern_declarations, ..*providers };
}
-pub(crate) fn get_lints() -> LintArray {
- lint_array!(CLASHING_EXTERN_DECLARATIONS)
+pub(crate) fn get_lints() -> LintVec {
+ vec![CLASHING_EXTERN_DECLARATIONS]
}
fn clashing_extern_declarations(tcx: TyCtxt<'_>, (): ()) {
diff --git a/compiler/rustc_lint/src/invalid_from_utf8.rs b/compiler/rustc_lint/src/invalid_from_utf8.rs
index 3291286ad..e398059ad 100644
--- a/compiler/rustc_lint/src/invalid_from_utf8.rs
+++ b/compiler/rustc_lint/src/invalid_from_utf8.rs
@@ -1,6 +1,6 @@
use std::str::Utf8Error;
-use rustc_ast::{BorrowKind, LitKind};
+use rustc_ast::LitKind;
use rustc_hir::{Expr, ExprKind};
use rustc_span::source_map::Spanned;
use rustc_span::sym;
@@ -11,7 +11,7 @@ use crate::{LateContext, LateLintPass, LintContext};
declare_lint! {
/// The `invalid_from_utf8_unchecked` lint checks for calls to
/// `std::str::from_utf8_unchecked` and `std::str::from_utf8_unchecked_mut`
- /// with an invalid UTF-8 literal.
+ /// with a known invalid UTF-8 value.
///
/// ### Example
///
@@ -36,7 +36,7 @@ declare_lint! {
declare_lint! {
/// The `invalid_from_utf8` lint checks for calls to
/// `std::str::from_utf8` and `std::str::from_utf8_mut`
- /// with an invalid UTF-8 literal.
+ /// with a known invalid UTF-8 value.
///
/// ### Example
///
@@ -67,8 +67,7 @@ impl<'tcx> LateLintPass<'tcx> for InvalidFromUtf8 {
&& [sym::str_from_utf8, sym::str_from_utf8_mut,
sym::str_from_utf8_unchecked, sym::str_from_utf8_unchecked_mut].contains(&diag_item)
{
- let lint = |utf8_error: Utf8Error| {
- let label = arg.span;
+ let lint = |label, utf8_error: Utf8Error| {
let method = diag_item.as_str().strip_prefix("str_").unwrap();
let method = format!("std::str::{method}");
let valid_up_to = utf8_error.valid_up_to();
@@ -78,22 +77,26 @@ impl<'tcx> LateLintPass<'tcx> for InvalidFromUtf8 {
if is_unchecked_variant { INVALID_FROM_UTF8_UNCHECKED } else { INVALID_FROM_UTF8 },
expr.span,
if is_unchecked_variant {
- InvalidFromUtf8Diag::Unchecked { method, valid_up_to, label }
+ InvalidFromUtf8Diag::Unchecked { method, valid_up_to, label }
} else {
- InvalidFromUtf8Diag::Checked { method, valid_up_to, label }
+ InvalidFromUtf8Diag::Checked { method, valid_up_to, label }
}
)
};
- match &arg.kind {
+ let mut init = cx.expr_or_init_with_outside_body(arg);
+ while let ExprKind::AddrOf(.., inner) = init.kind {
+ init = cx.expr_or_init_with_outside_body(inner);
+ }
+ match init.kind {
ExprKind::Lit(Spanned { node: lit, .. }) => {
if let LitKind::ByteStr(bytes, _) = &lit
&& let Err(utf8_error) = std::str::from_utf8(bytes)
{
- lint(utf8_error);
+ lint(init.span, utf8_error);
}
},
- ExprKind::AddrOf(BorrowKind::Ref, _, Expr { kind: ExprKind::Array(args), .. }) => {
+ ExprKind::Array(args) => {
let elements = args.iter().map(|e|{
match &e.kind {
ExprKind::Lit(Spanned { node: lit, .. }) => match lit {
@@ -108,7 +111,7 @@ impl<'tcx> LateLintPass<'tcx> for InvalidFromUtf8 {
if let Some(elements) = elements
&& let Err(utf8_error) = std::str::from_utf8(&elements)
{
- lint(utf8_error);
+ lint(init.span, utf8_error);
}
}
_ => {}
diff --git a/compiler/rustc_lint/src/late.rs b/compiler/rustc_lint/src/late.rs
index 73af51d9e..6c8b60c8d 100644
--- a/compiler/rustc_lint/src/late.rs
+++ b/compiler/rustc_lint/src/late.rs
@@ -21,7 +21,6 @@ use rustc_data_structures::sync::join;
use rustc_hir as hir;
use rustc_hir::def_id::{LocalDefId, LocalModDefId};
use rustc_hir::intravisit as hir_visit;
-use rustc_hir::intravisit::Visitor;
use rustc_middle::hir::nested_filter;
use rustc_middle::ty::{self, TyCtxt};
use rustc_session::lint::LintPass;
@@ -61,6 +60,9 @@ impl<'tcx, T: LateLintPass<'tcx>> LateContextAndPass<'tcx, T> {
self.context.last_node_with_lint_attrs = id;
debug!("late context: enter_attrs({:?})", attrs);
lint_callback!(self, enter_lint_attrs, attrs);
+ for attr in attrs {
+ lint_callback!(self, check_attribute, attr);
+ }
f(self);
debug!("late context: exit_attrs({:?})", attrs);
lint_callback!(self, exit_lint_attrs, attrs);
@@ -157,6 +159,10 @@ impl<'tcx, T: LateLintPass<'tcx>> hir_visit::Visitor<'tcx> for LateContextAndPas
hir_visit::walk_pat(self, p);
}
+ fn visit_expr_field(&mut self, field: &'tcx hir::ExprField<'tcx>) {
+ self.with_lint_attrs(field.hir_id, |cx| hir_visit::walk_expr_field(cx, field))
+ }
+
fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) {
ensure_sufficient_stack(|| {
self.with_lint_attrs(e.hir_id, |cx| {
@@ -377,20 +383,18 @@ fn late_lint_mod_inner<'tcx, T: LateLintPass<'tcx>>(
let (module, _span, hir_id) = tcx.hir().get_module(module_def_id);
- // There is no module lint that will have the crate itself as an item, so check it here.
- if hir_id == hir::CRATE_HIR_ID {
- lint_callback!(cx, check_crate,);
- }
+ cx.with_lint_attrs(hir_id, |cx| {
+ // There is no module lint that will have the crate itself as an item, so check it here.
+ if hir_id == hir::CRATE_HIR_ID {
+ lint_callback!(cx, check_crate,);
+ }
- cx.process_mod(module, hir_id);
+ cx.process_mod(module, hir_id);
- // Visit the crate attributes
- if hir_id == hir::CRATE_HIR_ID {
- for attr in tcx.hir().attrs(hir::CRATE_HIR_ID).iter() {
- cx.visit_attribute(attr)
+ if hir_id == hir::CRATE_HIR_ID {
+ lint_callback!(cx, check_crate_post,);
}
- lint_callback!(cx, check_crate_post,);
- }
+ });
}
fn late_lint_crate<'tcx>(tcx: TyCtxt<'tcx>) {
@@ -431,7 +435,6 @@ fn late_lint_crate_inner<'tcx, T: LateLintPass<'tcx>>(
// item), warn for it here.
lint_callback!(cx, check_crate,);
tcx.hir().walk_toplevel_module(cx);
- tcx.hir().walk_attributes(cx);
lint_callback!(cx, check_crate_post,);
})
}
diff --git a/compiler/rustc_lint/src/levels.rs b/compiler/rustc_lint/src/levels.rs
index 1f4e5fa4d..ba521b969 100644
--- a/compiler/rustc_lint/src/levels.rs
+++ b/compiler/rustc_lint/src/levels.rs
@@ -1,18 +1,23 @@
+use crate::errors::{CheckNameUnknownTool, RequestedLevel, UnsupportedGroup};
+use crate::lints::{
+ DeprecatedLintNameFromCommandLine, RemovedLintFromCommandLine, RenamedLintFromCommandLine,
+ UnknownLintFromCommandLine,
+};
use crate::{
builtin::MISSING_DOCS,
context::{CheckLintNameResult, LintStore},
fluent_generated as fluent,
late::unerased_lint_store,
lints::{
- DeprecatedLintName, IgnoredUnlessCrateSpecified, OverruledAttributeLint,
- RenamedOrRemovedLint, RenamedOrRemovedLintSuggestion, UnknownLint, UnknownLintSuggestion,
+ DeprecatedLintName, IgnoredUnlessCrateSpecified, OverruledAttributeLint, RemovedLint,
+ RenamedLint, RenamedLintSuggestion, UnknownLint, UnknownLintSuggestion,
},
};
use rustc_ast as ast;
use rustc_ast_pretty::pprust;
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{DecorateLint, DiagnosticBuilder, DiagnosticMessage, MultiSpan};
-use rustc_feature::Features;
+use rustc_feature::{Features, GateIssue};
use rustc_hir as hir;
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::HirId;
@@ -24,12 +29,14 @@ use rustc_middle::lint::{
};
use rustc_middle::query::Providers;
use rustc_middle::ty::{RegisteredTools, TyCtxt};
-use rustc_session::lint::builtin::{RENAMED_AND_REMOVED_LINTS, UNKNOWN_LINTS, UNUSED_ATTRIBUTES};
use rustc_session::lint::{
- builtin::{self, FORBIDDEN_LINT_GROUPS, SINGLE_USE_LIFETIMES, UNFULFILLED_LINT_EXPECTATIONS},
+ builtin::{
+ self, FORBIDDEN_LINT_GROUPS, RENAMED_AND_REMOVED_LINTS, SINGLE_USE_LIFETIMES,
+ UNFULFILLED_LINT_EXPECTATIONS, UNKNOWN_LINTS, UNUSED_ATTRIBUTES,
+ },
Level, Lint, LintExpectationId, LintId,
};
-use rustc_session::parse::{add_feature_diagnostics, feature_err};
+use rustc_session::parse::feature_err;
use rustc_session::Session;
use rustc_span::symbol::{sym, Symbol};
use rustc_span::{Span, DUMMY_SP};
@@ -331,6 +338,11 @@ impl<'tcx> Visitor<'tcx> for LintLevelsBuilder<'_, LintLevelQueryMap<'tcx>> {
intravisit::walk_expr(self, e);
}
+ fn visit_expr_field(&mut self, f: &'tcx hir::ExprField<'tcx>) {
+ self.add_id(f.hir_id);
+ intravisit::walk_expr_field(self, f);
+ }
+
fn visit_field_def(&mut self, s: &'tcx hir::FieldDef<'tcx>) {
self.add_id(s.hir_id);
intravisit::walk_field_def(self, s);
@@ -550,12 +562,55 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
fn add_command_line(&mut self) {
for &(ref lint_name, level) in &self.sess.opts.lint_opts {
- self.store.check_lint_name_cmdline(self.sess, &lint_name, level, self.registered_tools);
+ // Checks the validity of lint names derived from the command line.
+ let (tool_name, lint_name_only) = parse_lint_and_tool_name(lint_name);
+ if lint_name_only == crate::WARNINGS.name_lower()
+ && matches!(level, Level::ForceWarn(_))
+ {
+ self.sess.emit_err(UnsupportedGroup { lint_group: crate::WARNINGS.name_lower() });
+ }
+ match self.store.check_lint_name(lint_name_only, tool_name, self.registered_tools) {
+ CheckLintNameResult::Renamed(ref replace) => {
+ let name = lint_name.as_str();
+ let suggestion = RenamedLintSuggestion::WithoutSpan { replace };
+ let requested_level = RequestedLevel { level, lint_name };
+ let lint = RenamedLintFromCommandLine { name, suggestion, requested_level };
+ self.emit_lint(RENAMED_AND_REMOVED_LINTS, lint);
+ }
+ CheckLintNameResult::Removed(ref reason) => {
+ let name = lint_name.as_str();
+ let requested_level = RequestedLevel { level, lint_name };
+ let lint = RemovedLintFromCommandLine { name, reason, requested_level };
+ self.emit_lint(RENAMED_AND_REMOVED_LINTS, lint);
+ }
+ CheckLintNameResult::NoLint(suggestion) => {
+ let name = lint_name.clone();
+ let suggestion =
+ suggestion.map(|replace| UnknownLintSuggestion::WithoutSpan { replace });
+ let requested_level = RequestedLevel { level, lint_name };
+ let lint = UnknownLintFromCommandLine { name, suggestion, requested_level };
+ self.emit_lint(UNKNOWN_LINTS, lint);
+ }
+ CheckLintNameResult::Tool(Err((Some(_), ref replace))) => {
+ let name = lint_name.clone();
+ let requested_level = RequestedLevel { level, lint_name };
+ let lint = DeprecatedLintNameFromCommandLine { name, replace, requested_level };
+ self.emit_lint(RENAMED_AND_REMOVED_LINTS, lint);
+ }
+ CheckLintNameResult::NoTool => {
+ self.sess.emit_err(CheckNameUnknownTool {
+ tool_name: tool_name.unwrap(),
+ sub: RequestedLevel { level, lint_name },
+ });
+ }
+ _ => {}
+ };
+
let orig_level = level;
let lint_flag_val = Symbol::intern(lint_name);
let Ok(ids) = self.store.find_lints(&lint_name) else {
- // errors handled in check_lint_name_cmdline above
+ // errors already handled above
continue;
};
for id in ids {
@@ -566,7 +621,7 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
continue;
}
- if self.check_gated_lint(id, DUMMY_SP) {
+ if self.check_gated_lint(id, DUMMY_SP, true) {
let src = LintLevelSource::CommandLine(lint_flag_val, orig_level);
self.insert(id, (level, src));
}
@@ -837,7 +892,7 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
reason,
};
for &id in *ids {
- if self.check_gated_lint(id, attr.span) {
+ if self.check_gated_lint(id, attr.span, false) {
self.insert_spec(id, (level, src));
}
}
@@ -854,7 +909,7 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
reason,
};
for &id in ids {
- if self.check_gated_lint(id, attr.span) {
+ if self.check_gated_lint(id, attr.span, false) {
self.insert_spec(id, (level, src));
}
}
@@ -913,37 +968,37 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
_ if !self.warn_about_weird_lints => {}
- CheckLintNameResult::Warning(msg, renamed) => {
+ CheckLintNameResult::Renamed(ref replace) => {
let suggestion =
- renamed.as_ref().map(|replace| RenamedOrRemovedLintSuggestion {
- suggestion: sp,
- replace: replace.as_str(),
- });
- self.emit_spanned_lint(
- RENAMED_AND_REMOVED_LINTS,
- sp.into(),
- RenamedOrRemovedLint { msg, suggestion },
- );
+ RenamedLintSuggestion::WithSpan { suggestion: sp, replace };
+ let name = tool_ident.map(|tool| format!("{tool}::{name}")).unwrap_or(name);
+ let lint = RenamedLint { name: name.as_str(), suggestion };
+ self.emit_spanned_lint(RENAMED_AND_REMOVED_LINTS, sp.into(), lint);
+ }
+
+ CheckLintNameResult::Removed(ref reason) => {
+ let name = tool_ident.map(|tool| format!("{tool}::{name}")).unwrap_or(name);
+ let lint = RemovedLint { name: name.as_str(), reason };
+ self.emit_spanned_lint(RENAMED_AND_REMOVED_LINTS, sp.into(), lint);
}
+
CheckLintNameResult::NoLint(suggestion) => {
let name = if let Some(tool_ident) = tool_ident {
format!("{}::{}", tool_ident.name, name)
} else {
name.to_string()
};
- let suggestion = suggestion
- .map(|replace| UnknownLintSuggestion { suggestion: sp, replace });
- self.emit_spanned_lint(
- UNKNOWN_LINTS,
- sp.into(),
- UnknownLint { name, suggestion },
- );
+ let suggestion = suggestion.map(|replace| {
+ UnknownLintSuggestion::WithSpan { suggestion: sp, replace }
+ });
+ let lint = UnknownLint { name, suggestion };
+ self.emit_spanned_lint(UNKNOWN_LINTS, sp.into(), lint);
}
}
// If this lint was renamed, apply the new lint instead of ignoring the attribute.
// This happens outside of the match because the new lint should be applied even if
// we don't warn about the name change.
- if let CheckLintNameResult::Warning(_, Some(new_name)) = lint_result {
+ if let CheckLintNameResult::Renamed(new_name) = lint_result {
// Ignore any errors or warnings that happen because the new name is inaccurate
// NOTE: `new_name` already includes the tool name, so we don't have to add it again.
if let CheckLintNameResult::Ok(ids) =
@@ -955,7 +1010,7 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
reason,
};
for &id in ids {
- if self.check_gated_lint(id, attr.span) {
+ if self.check_gated_lint(id, attr.span, false) {
self.insert_spec(id, (level, src));
}
}
@@ -1000,7 +1055,7 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
// FIXME only emit this once for each attribute, instead of repeating it 4 times for
// pre-expansion lints, post-expansion lints, `shallow_lint_levels_on` and `lint_expectations`.
#[track_caller]
- fn check_gated_lint(&self, lint_id: LintId, span: Span) -> bool {
+ fn check_gated_lint(&self, lint_id: LintId, span: Span, lint_from_cli: bool) -> bool {
if let Some(feature) = lint_id.lint.feature_gate {
if !self.features.enabled(feature) {
let lint = builtin::UNKNOWN_LINTS;
@@ -1015,7 +1070,13 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
|lint| {
lint.set_arg("name", lint_id.lint.name_lower());
lint.note(fluent::lint_note);
- add_feature_diagnostics(lint, &self.sess.parse_sess, feature);
+ rustc_session::parse::add_feature_diagnostics_for_issue(
+ lint,
+ &self.sess.parse_sess,
+ feature,
+ GateIssue::Language,
+ lint_from_cli,
+ );
lint
},
);
@@ -1076,3 +1137,14 @@ impl<'s, P: LintLevelsProvider> LintLevelsBuilder<'s, P> {
pub(crate) fn provide(providers: &mut Providers) {
*providers = Providers { shallow_lint_levels_on, lint_expectations, ..*providers };
}
+
+pub fn parse_lint_and_tool_name(lint_name: &str) -> (Option<Symbol>, &str) {
+ match lint_name.split_once("::") {
+ Some((tool_name, lint_name)) => {
+ let tool_name = Symbol::intern(tool_name);
+
+ (Some(tool_name), lint_name)
+ }
+ None => (None, lint_name),
+ }
+}
diff --git a/compiler/rustc_lint/src/lib.rs b/compiler/rustc_lint/src/lib.rs
index 585b10e79..72c103f2d 100644
--- a/compiler/rustc_lint/src/lib.rs
+++ b/compiler/rustc_lint/src/lib.rs
@@ -40,7 +40,7 @@
#![recursion_limit = "256"]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#[macro_use]
extern crate rustc_middle;
@@ -86,18 +86,14 @@ mod unused;
pub use array_into_iter::ARRAY_INTO_ITER;
-use rustc_ast as ast;
use rustc_errors::{DiagnosticMessage, SubdiagnosticMessage};
use rustc_fluent_macro::fluent_messages;
-use rustc_hir as hir;
-use rustc_hir::def_id::{LocalDefId, LocalModDefId};
+use rustc_hir::def_id::LocalModDefId;
use rustc_middle::query::Providers;
use rustc_middle::ty::TyCtxt;
use rustc_session::lint::builtin::{
BARE_TRAIT_OBJECTS, ELIDED_LIFETIMES_IN_PATHS, EXPLICIT_OUTLIVES_REQUIREMENTS,
};
-use rustc_span::symbol::Ident;
-use rustc_span::Span;
use array_into_iter::ArrayIntoIter;
use builtin::*;
@@ -134,7 +130,7 @@ pub use late::{check_crate, late_lint_mod, unerased_lint_store};
pub use passes::{EarlyLintPass, LateLintPass};
pub use rustc_session::lint::Level::{self, *};
pub use rustc_session::lint::{BufferedEarlyLint, FutureIncompatibleInfo, Lint, LintId};
-pub use rustc_session::lint::{LintArray, LintPass};
+pub use rustc_session::lint::{LintPass, LintVec};
fluent_messages! { "../messages.ftl" }
@@ -200,7 +196,7 @@ late_lint_methods!(
BoxPointers: BoxPointers,
PathStatements: PathStatements,
LetUnderscore: LetUnderscore,
- InvalidReferenceCasting: InvalidReferenceCasting::default(),
+ InvalidReferenceCasting: InvalidReferenceCasting,
// Depends on referenced function signatures in expressions
UnusedResults: UnusedResults,
NonUpperCaseGlobals: NonUpperCaseGlobals,
@@ -500,6 +496,11 @@ fn register_builtins(store: &mut LintStore) {
"converted into hard error, see issue #82523 \
<https://github.com/rust-lang/rust/issues/82523> for more information",
);
+ store.register_removed(
+ "private_in_public",
+ "replaced with another group of lints, see RFC \
+ <https://rust-lang.github.io/rfcs/2145-type-privacy.html> for more information",
+ );
}
fn register_internals(store: &mut LintStore) {
diff --git a/compiler/rustc_lint/src/lints.rs b/compiler/rustc_lint/src/lints.rs
index 25982a458..c091c260a 100644
--- a/compiler/rustc_lint/src/lints.rs
+++ b/compiler/rustc_lint/src/lints.rs
@@ -2,6 +2,7 @@
#![allow(rustc::diagnostic_outside_of_impl)]
use std::num::NonZeroU32;
+use crate::errors::RequestedLevel;
use crate::fluent_generated as fluent;
use rustc_errors::{
AddToDiagnostic, Applicability, DecorateLint, DiagnosticMessage, DiagnosticStyledString,
@@ -634,6 +635,8 @@ pub enum PtrNullChecksDiag<'a> {
#[label]
label: Span,
},
+ #[diag(lint_ptr_null_checks_fn_ret)]
+ FnRet { fn_name: Ident },
}
// for_loops_over_fallibles.rs
@@ -764,11 +767,13 @@ pub enum InvalidFromUtf8Diag {
#[derive(LintDiagnostic)]
pub enum InvalidReferenceCastingDiag {
#[diag(lint_invalid_reference_casting_borrow_as_mut)]
+ #[note(lint_invalid_reference_casting_note_book)]
BorrowAsMut {
#[label]
orig_cast: Option<Span>,
},
#[diag(lint_invalid_reference_casting_assign_to_ref)]
+ #[note(lint_invalid_reference_casting_note_book)]
AssignToRef {
#[label]
orig_cast: Option<Span>,
@@ -1010,21 +1015,60 @@ pub struct DeprecatedLintName<'a> {
pub replace: &'a str,
}
-// FIXME: Non-translatable msg
#[derive(LintDiagnostic)]
-#[diag(lint_renamed_or_removed_lint)]
-pub struct RenamedOrRemovedLint<'a> {
- pub msg: &'a str,
+#[diag(lint_deprecated_lint_name)]
+#[help]
+pub struct DeprecatedLintNameFromCommandLine<'a> {
+ pub name: String,
+ pub replace: &'a str,
+ #[subdiagnostic]
+ pub requested_level: RequestedLevel<'a>,
+}
+
+#[derive(LintDiagnostic)]
+#[diag(lint_renamed_lint)]
+pub struct RenamedLint<'a> {
+ pub name: &'a str,
#[subdiagnostic]
- pub suggestion: Option<RenamedOrRemovedLintSuggestion<'a>>,
+ pub suggestion: RenamedLintSuggestion<'a>,
}
#[derive(Subdiagnostic)]
-#[suggestion(lint_suggestion, code = "{replace}", applicability = "machine-applicable")]
-pub struct RenamedOrRemovedLintSuggestion<'a> {
- #[primary_span]
- pub suggestion: Span,
- pub replace: &'a str,
+pub enum RenamedLintSuggestion<'a> {
+ #[suggestion(lint_suggestion, code = "{replace}", applicability = "machine-applicable")]
+ WithSpan {
+ #[primary_span]
+ suggestion: Span,
+ replace: &'a str,
+ },
+ #[help(lint_help)]
+ WithoutSpan { replace: &'a str },
+}
+
+#[derive(LintDiagnostic)]
+#[diag(lint_renamed_lint)]
+pub struct RenamedLintFromCommandLine<'a> {
+ pub name: &'a str,
+ #[subdiagnostic]
+ pub suggestion: RenamedLintSuggestion<'a>,
+ #[subdiagnostic]
+ pub requested_level: RequestedLevel<'a>,
+}
+
+#[derive(LintDiagnostic)]
+#[diag(lint_removed_lint)]
+pub struct RemovedLint<'a> {
+ pub name: &'a str,
+ pub reason: &'a str,
+}
+
+#[derive(LintDiagnostic)]
+#[diag(lint_removed_lint)]
+pub struct RemovedLintFromCommandLine<'a> {
+ pub name: &'a str,
+ pub reason: &'a str,
+ #[subdiagnostic]
+ pub requested_level: RequestedLevel<'a>,
}
#[derive(LintDiagnostic)]
@@ -1036,11 +1080,25 @@ pub struct UnknownLint {
}
#[derive(Subdiagnostic)]
-#[suggestion(lint_suggestion, code = "{replace}", applicability = "maybe-incorrect")]
-pub struct UnknownLintSuggestion {
- #[primary_span]
- pub suggestion: Span,
- pub replace: Symbol,
+pub enum UnknownLintSuggestion {
+ #[suggestion(lint_suggestion, code = "{replace}", applicability = "maybe-incorrect")]
+ WithSpan {
+ #[primary_span]
+ suggestion: Span,
+ replace: Symbol,
+ },
+ #[help(lint_help)]
+ WithoutSpan { replace: Symbol },
+}
+
+#[derive(LintDiagnostic)]
+#[diag(lint_unknown_lint, code = "E0602")]
+pub struct UnknownLintFromCommandLine<'a> {
+ pub name: String,
+ #[subdiagnostic]
+ pub suggestion: Option<UnknownLintSuggestion>,
+ #[subdiagnostic]
+ pub requested_level: RequestedLevel<'a>,
}
#[derive(LintDiagnostic)]
diff --git a/compiler/rustc_lint/src/noop_method_call.rs b/compiler/rustc_lint/src/noop_method_call.rs
index bc0b9d6d8..cfbca6efb 100644
--- a/compiler/rustc_lint/src/noop_method_call.rs
+++ b/compiler/rustc_lint/src/noop_method_call.rs
@@ -98,6 +98,12 @@ impl<'tcx> LateLintPass<'tcx> for NoopMethodCall {
let Ok(Some(i)) = ty::Instance::resolve(cx.tcx, cx.param_env, did, args) else { return };
// (Re)check that it implements the noop diagnostic.
let Some(name) = cx.tcx.get_diagnostic_name(i.def_id()) else { return };
+ if !matches!(
+ name,
+ sym::noop_method_borrow | sym::noop_method_clone | sym::noop_method_deref
+ ) {
+ return;
+ }
let receiver_ty = cx.typeck_results().expr_ty(receiver);
let expr_ty = cx.typeck_results().expr_ty_adjusted(expr);
diff --git a/compiler/rustc_lint/src/passes.rs b/compiler/rustc_lint/src/passes.rs
index 16964565b..508f3e1ec 100644
--- a/compiler/rustc_lint/src/passes.rs
+++ b/compiler/rustc_lint/src/passes.rs
@@ -1,58 +1,53 @@
use crate::context::{EarlyContext, LateContext};
-use rustc_ast as ast;
-use rustc_hir as hir;
use rustc_session::lint::builtin::HardwiredLints;
use rustc_session::lint::LintPass;
-use rustc_span::def_id::LocalDefId;
-use rustc_span::symbol::Ident;
-use rustc_span::Span;
#[macro_export]
macro_rules! late_lint_methods {
($macro:path, $args:tt) => (
$macro!($args, [
- fn check_body(a: &'tcx hir::Body<'tcx>);
- fn check_body_post(a: &'tcx hir::Body<'tcx>);
+ fn check_body(a: &'tcx rustc_hir::Body<'tcx>);
+ fn check_body_post(a: &'tcx rustc_hir::Body<'tcx>);
fn check_crate();
fn check_crate_post();
- fn check_mod(a: &'tcx hir::Mod<'tcx>, b: hir::HirId);
- fn check_foreign_item(a: &'tcx hir::ForeignItem<'tcx>);
- fn check_item(a: &'tcx hir::Item<'tcx>);
- fn check_item_post(a: &'tcx hir::Item<'tcx>);
- fn check_local(a: &'tcx hir::Local<'tcx>);
- fn check_block(a: &'tcx hir::Block<'tcx>);
- fn check_block_post(a: &'tcx hir::Block<'tcx>);
- fn check_stmt(a: &'tcx hir::Stmt<'tcx>);
- fn check_arm(a: &'tcx hir::Arm<'tcx>);
- fn check_pat(a: &'tcx hir::Pat<'tcx>);
- fn check_expr(a: &'tcx hir::Expr<'tcx>);
- fn check_expr_post(a: &'tcx hir::Expr<'tcx>);
- fn check_ty(a: &'tcx hir::Ty<'tcx>);
- fn check_generic_param(a: &'tcx hir::GenericParam<'tcx>);
- fn check_generics(a: &'tcx hir::Generics<'tcx>);
- fn check_poly_trait_ref(a: &'tcx hir::PolyTraitRef<'tcx>);
+ fn check_mod(a: &'tcx rustc_hir::Mod<'tcx>, b: rustc_hir::HirId);
+ fn check_foreign_item(a: &'tcx rustc_hir::ForeignItem<'tcx>);
+ fn check_item(a: &'tcx rustc_hir::Item<'tcx>);
+ fn check_item_post(a: &'tcx rustc_hir::Item<'tcx>);
+ fn check_local(a: &'tcx rustc_hir::Local<'tcx>);
+ fn check_block(a: &'tcx rustc_hir::Block<'tcx>);
+ fn check_block_post(a: &'tcx rustc_hir::Block<'tcx>);
+ fn check_stmt(a: &'tcx rustc_hir::Stmt<'tcx>);
+ fn check_arm(a: &'tcx rustc_hir::Arm<'tcx>);
+ fn check_pat(a: &'tcx rustc_hir::Pat<'tcx>);
+ fn check_expr(a: &'tcx rustc_hir::Expr<'tcx>);
+ fn check_expr_post(a: &'tcx rustc_hir::Expr<'tcx>);
+ fn check_ty(a: &'tcx rustc_hir::Ty<'tcx>);
+ fn check_generic_param(a: &'tcx rustc_hir::GenericParam<'tcx>);
+ fn check_generics(a: &'tcx rustc_hir::Generics<'tcx>);
+ fn check_poly_trait_ref(a: &'tcx rustc_hir::PolyTraitRef<'tcx>);
fn check_fn(
a: rustc_hir::intravisit::FnKind<'tcx>,
- b: &'tcx hir::FnDecl<'tcx>,
- c: &'tcx hir::Body<'tcx>,
- d: Span,
- e: LocalDefId);
- fn check_trait_item(a: &'tcx hir::TraitItem<'tcx>);
- fn check_impl_item(a: &'tcx hir::ImplItem<'tcx>);
- fn check_impl_item_post(a: &'tcx hir::ImplItem<'tcx>);
- fn check_struct_def(a: &'tcx hir::VariantData<'tcx>);
- fn check_field_def(a: &'tcx hir::FieldDef<'tcx>);
- fn check_variant(a: &'tcx hir::Variant<'tcx>);
- fn check_path(a: &hir::Path<'tcx>, b: hir::HirId);
- fn check_attribute(a: &'tcx ast::Attribute);
+ b: &'tcx rustc_hir::FnDecl<'tcx>,
+ c: &'tcx rustc_hir::Body<'tcx>,
+ d: rustc_span::Span,
+ e: rustc_span::def_id::LocalDefId);
+ fn check_trait_item(a: &'tcx rustc_hir::TraitItem<'tcx>);
+ fn check_impl_item(a: &'tcx rustc_hir::ImplItem<'tcx>);
+ fn check_impl_item_post(a: &'tcx rustc_hir::ImplItem<'tcx>);
+ fn check_struct_def(a: &'tcx rustc_hir::VariantData<'tcx>);
+ fn check_field_def(a: &'tcx rustc_hir::FieldDef<'tcx>);
+ fn check_variant(a: &'tcx rustc_hir::Variant<'tcx>);
+ fn check_path(a: &rustc_hir::Path<'tcx>, b: rustc_hir::HirId);
+ fn check_attribute(a: &'tcx rustc_ast::Attribute);
/// Called when entering a syntax node that can have lint attributes such
/// as `#[allow(...)]`. Called with *all* the attributes of that node.
- fn enter_lint_attrs(a: &'tcx [ast::Attribute]);
+ fn enter_lint_attrs(a: &'tcx [rustc_ast::Attribute]);
/// Counterpart to `enter_lint_attrs`.
- fn exit_lint_attrs(a: &'tcx [ast::Attribute]);
+ fn exit_lint_attrs(a: &'tcx [rustc_ast::Attribute]);
]);
)
}
@@ -90,8 +85,8 @@ macro_rules! expand_combined_late_lint_pass_method {
#[macro_export]
macro_rules! expand_combined_late_lint_pass_methods {
($passes:tt, [$($(#[$attr:meta])* fn $name:ident($($param:ident: $arg:ty),*);)*]) => (
- $(fn $name(&mut self, context: &LateContext<'tcx>, $($param: $arg),*) {
- expand_combined_late_lint_pass_method!($passes, self, $name, (context, $($param),*));
+ $(fn $name(&mut self, context: &$crate::LateContext<'tcx>, $($param: $arg),*) {
+ $crate::expand_combined_late_lint_pass_method!($passes, self, $name, (context, $($param),*));
})*
)
}
@@ -116,19 +111,19 @@ macro_rules! declare_combined_late_lint_pass {
}
}
- $v fn get_lints() -> LintArray {
+ $v fn get_lints() -> $crate::LintVec {
let mut lints = Vec::new();
$(lints.extend_from_slice(&$pass::get_lints());)*
lints
}
}
- impl<'tcx> LateLintPass<'tcx> for $name {
- expand_combined_late_lint_pass_methods!([$($pass),*], $methods);
+ impl<'tcx> $crate::LateLintPass<'tcx> for $name {
+ $crate::expand_combined_late_lint_pass_methods!([$($pass),*], $methods);
}
#[allow(rustc::lint_pass_impl_without_macro)]
- impl LintPass for $name {
+ impl $crate::LintPass for $name {
fn name(&self) -> &'static str {
panic!()
}
@@ -140,41 +135,45 @@ macro_rules! declare_combined_late_lint_pass {
macro_rules! early_lint_methods {
($macro:path, $args:tt) => (
$macro!($args, [
- fn check_param(a: &ast::Param);
- fn check_ident(a: Ident);
- fn check_crate(a: &ast::Crate);
- fn check_crate_post(a: &ast::Crate);
- fn check_item(a: &ast::Item);
- fn check_item_post(a: &ast::Item);
- fn check_local(a: &ast::Local);
- fn check_block(a: &ast::Block);
- fn check_stmt(a: &ast::Stmt);
- fn check_arm(a: &ast::Arm);
- fn check_pat(a: &ast::Pat);
- fn check_pat_post(a: &ast::Pat);
- fn check_expr(a: &ast::Expr);
- fn check_ty(a: &ast::Ty);
- fn check_generic_arg(a: &ast::GenericArg);
- fn check_generic_param(a: &ast::GenericParam);
- fn check_generics(a: &ast::Generics);
- fn check_poly_trait_ref(a: &ast::PolyTraitRef);
- fn check_fn(a: rustc_ast::visit::FnKind<'_>, c: Span, d_: ast::NodeId);
- fn check_trait_item(a: &ast::AssocItem);
- fn check_impl_item(a: &ast::AssocItem);
- fn check_variant(a: &ast::Variant);
- fn check_attribute(a: &ast::Attribute);
- fn check_mac_def(a: &ast::MacroDef);
- fn check_mac(a: &ast::MacCall);
+ fn check_param(a: &rustc_ast::Param);
+ fn check_ident(a: rustc_span::symbol::Ident);
+ fn check_crate(a: &rustc_ast::Crate);
+ fn check_crate_post(a: &rustc_ast::Crate);
+ fn check_item(a: &rustc_ast::Item);
+ fn check_item_post(a: &rustc_ast::Item);
+ fn check_local(a: &rustc_ast::Local);
+ fn check_block(a: &rustc_ast::Block);
+ fn check_stmt(a: &rustc_ast::Stmt);
+ fn check_arm(a: &rustc_ast::Arm);
+ fn check_pat(a: &rustc_ast::Pat);
+ fn check_pat_post(a: &rustc_ast::Pat);
+ fn check_expr(a: &rustc_ast::Expr);
+ fn check_expr_post(a: &rustc_ast::Expr);
+ fn check_ty(a: &rustc_ast::Ty);
+ fn check_generic_arg(a: &rustc_ast::GenericArg);
+ fn check_generic_param(a: &rustc_ast::GenericParam);
+ fn check_generics(a: &rustc_ast::Generics);
+ fn check_poly_trait_ref(a: &rustc_ast::PolyTraitRef);
+ fn check_fn(
+ a: rustc_ast::visit::FnKind<'_>,
+ c: rustc_span::Span,
+ d_: rustc_ast::NodeId);
+ fn check_trait_item(a: &rustc_ast::AssocItem);
+ fn check_impl_item(a: &rustc_ast::AssocItem);
+ fn check_variant(a: &rustc_ast::Variant);
+ fn check_attribute(a: &rustc_ast::Attribute);
+ fn check_mac_def(a: &rustc_ast::MacroDef);
+ fn check_mac(a: &rustc_ast::MacCall);
/// Called when entering a syntax node that can have lint attributes such
/// as `#[allow(...)]`. Called with *all* the attributes of that node.
- fn enter_lint_attrs(a: &[ast::Attribute]);
+ fn enter_lint_attrs(a: &[rustc_ast::Attribute]);
/// Counterpart to `enter_lint_attrs`.
- fn exit_lint_attrs(a: &[ast::Attribute]);
+ fn exit_lint_attrs(a: &[rustc_ast::Attribute]);
- fn enter_where_predicate(a: &ast::WherePredicate);
- fn exit_where_predicate(a: &ast::WherePredicate);
+ fn enter_where_predicate(a: &rustc_ast::WherePredicate);
+ fn exit_where_predicate(a: &rustc_ast::WherePredicate);
]);
)
}
@@ -201,8 +200,8 @@ macro_rules! expand_combined_early_lint_pass_method {
#[macro_export]
macro_rules! expand_combined_early_lint_pass_methods {
($passes:tt, [$($(#[$attr:meta])* fn $name:ident($($param:ident: $arg:ty),*);)*]) => (
- $(fn $name(&mut self, context: &EarlyContext<'_>, $($param: $arg),*) {
- expand_combined_early_lint_pass_method!($passes, self, $name, (context, $($param),*));
+ $(fn $name(&mut self, context: &$crate::EarlyContext<'_>, $($param: $arg),*) {
+ $crate::expand_combined_early_lint_pass_method!($passes, self, $name, (context, $($param),*));
})*
)
}
@@ -227,19 +226,19 @@ macro_rules! declare_combined_early_lint_pass {
}
}
- $v fn get_lints() -> LintArray {
+ $v fn get_lints() -> $crate::LintVec {
let mut lints = Vec::new();
$(lints.extend_from_slice(&$pass::get_lints());)*
lints
}
}
- impl EarlyLintPass for $name {
- expand_combined_early_lint_pass_methods!([$($pass),*], $methods);
+ impl $crate::EarlyLintPass for $name {
+ $crate::expand_combined_early_lint_pass_methods!([$($pass),*], $methods);
}
#[allow(rustc::lint_pass_impl_without_macro)]
- impl LintPass for $name {
+ impl $crate::LintPass for $name {
fn name(&self) -> &'static str {
panic!()
}
diff --git a/compiler/rustc_lint/src/ptr_nulls.rs b/compiler/rustc_lint/src/ptr_nulls.rs
index 02aff9103..0de72d8d3 100644
--- a/compiler/rustc_lint/src/ptr_nulls.rs
+++ b/compiler/rustc_lint/src/ptr_nulls.rs
@@ -31,12 +31,30 @@ declare_lint! {
declare_lint_pass!(PtrNullChecks => [USELESS_PTR_NULL_CHECKS]);
-/// This function detects and returns the original expression from a series of consecutive casts,
-/// ie. `(my_fn as *const _ as *mut _).cast_mut()` would return the expression for `my_fn`.
-fn ptr_cast_chain<'a>(cx: &'a LateContext<'_>, mut e: &'a Expr<'a>) -> Option<&'a Expr<'a>> {
+/// This function checks if the expression is from a series of consecutive casts,
+/// ie. `(my_fn as *const _ as *mut _).cast_mut()` and whether the original expression is either
+/// a fn ptr, a reference, or a function call whose definition is
+/// annotated with `#![rustc_never_returns_null_ptr]`.
+/// If this situation is present, the function returns the appropriate diagnostic.
+fn incorrect_check<'a, 'tcx: 'a>(
+ cx: &'a LateContext<'tcx>,
+ mut e: &'a Expr<'a>,
+) -> Option<PtrNullChecksDiag<'tcx>> {
let mut had_at_least_one_cast = false;
loop {
e = e.peel_blocks();
+ if let ExprKind::MethodCall(_, _expr, [], _) = e.kind
+ && let Some(def_id) = cx.typeck_results().type_dependent_def_id(e.hir_id)
+ && cx.tcx.has_attr(def_id, sym::rustc_never_returns_null_ptr)
+ && let Some(fn_name) = cx.tcx.opt_item_ident(def_id) {
+ return Some(PtrNullChecksDiag::FnRet { fn_name });
+ } else if let ExprKind::Call(path, _args) = e.kind
+ && let ExprKind::Path(ref qpath) = path.kind
+ && let Some(def_id) = cx.qpath_res(qpath, path.hir_id).opt_def_id()
+ && cx.tcx.has_attr(def_id, sym::rustc_never_returns_null_ptr)
+ && let Some(fn_name) = cx.tcx.opt_item_ident(def_id) {
+ return Some(PtrNullChecksDiag::FnRet { fn_name });
+ }
e = if let ExprKind::Cast(expr, t) = e.kind
&& let TyKind::Ptr(_) = t.kind {
had_at_least_one_cast = true;
@@ -46,33 +64,21 @@ fn ptr_cast_chain<'a>(cx: &'a LateContext<'_>, mut e: &'a Expr<'a>) -> Option<&'
&& matches!(cx.tcx.get_diagnostic_name(def_id), Some(sym::ptr_cast | sym::ptr_cast_mut)) {
had_at_least_one_cast = true;
expr
- } else if let ExprKind::Call(path, [arg]) = e.kind
- && let ExprKind::Path(ref qpath) = path.kind
- && let Some(def_id) = cx.qpath_res(qpath, path.hir_id).opt_def_id()
- && matches!(cx.tcx.get_diagnostic_name(def_id), Some(sym::ptr_from_ref | sym::ptr_from_mut)) {
- had_at_least_one_cast = true;
- arg
} else if had_at_least_one_cast {
- return Some(e);
+ let orig_ty = cx.typeck_results().expr_ty(e);
+ return if orig_ty.is_fn() {
+ Some(PtrNullChecksDiag::FnPtr { orig_ty, label: e.span })
+ } else if orig_ty.is_ref() {
+ Some(PtrNullChecksDiag::Ref { orig_ty, label: e.span })
+ } else {
+ None
+ };
} else {
return None;
};
}
}
-fn incorrect_check<'a>(cx: &LateContext<'a>, expr: &Expr<'_>) -> Option<PtrNullChecksDiag<'a>> {
- let expr = ptr_cast_chain(cx, expr)?;
-
- let orig_ty = cx.typeck_results().expr_ty(expr);
- if orig_ty.is_fn() {
- Some(PtrNullChecksDiag::FnPtr { orig_ty, label: expr.span })
- } else if orig_ty.is_ref() {
- Some(PtrNullChecksDiag::Ref { orig_ty, label: expr.span })
- } else {
- None
- }
-}
-
impl<'tcx> LateLintPass<'tcx> for PtrNullChecks {
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
match expr.kind {
diff --git a/compiler/rustc_lint/src/reference_casting.rs b/compiler/rustc_lint/src/reference_casting.rs
index 2577cabb3..39def599b 100644
--- a/compiler/rustc_lint/src/reference_casting.rs
+++ b/compiler/rustc_lint/src/reference_casting.rs
@@ -1,8 +1,7 @@
use rustc_ast::Mutability;
-use rustc_data_structures::fx::FxHashMap;
-use rustc_hir::{def::Res, Expr, ExprKind, HirId, Local, QPath, StmtKind, UnOp};
+use rustc_hir::{Expr, ExprKind, UnOp};
use rustc_middle::ty::{self, TypeAndMut};
-use rustc_span::{sym, Span};
+use rustc_span::sym;
use crate::{lints::InvalidReferenceCastingDiag, LateContext, LateLintPass, LintContext};
@@ -34,51 +33,18 @@ declare_lint! {
"casts of `&T` to `&mut T` without interior mutability"
}
-#[derive(Default)]
-pub struct InvalidReferenceCasting {
- casted: FxHashMap<HirId, Span>,
-}
-
-impl_lint_pass!(InvalidReferenceCasting => [INVALID_REFERENCE_CASTING]);
+declare_lint_pass!(InvalidReferenceCasting => [INVALID_REFERENCE_CASTING]);
impl<'tcx> LateLintPass<'tcx> for InvalidReferenceCasting {
- fn check_stmt(&mut self, cx: &LateContext<'tcx>, stmt: &'tcx rustc_hir::Stmt<'tcx>) {
- let StmtKind::Local(local) = stmt.kind else {
- return;
- };
- let Local { init: Some(init), els: None, .. } = local else {
- return;
- };
-
- if is_cast_from_const_to_mut(cx, init) {
- self.casted.insert(local.pat.hir_id, init.span);
- }
- }
-
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'tcx>) {
- // &mut <expr>
- let inner = if let ExprKind::AddrOf(_, Mutability::Mut, expr) = expr.kind {
- expr
- // <expr> = ...
- } else if let ExprKind::Assign(expr, _, _) = expr.kind {
- expr
- // <expr> += ...
- } else if let ExprKind::AssignOp(_, expr, _) = expr.kind {
- expr
- } else {
+ let Some((is_assignment, e)) = is_operation_we_care_about(cx, expr) else {
return;
};
- let ExprKind::Unary(UnOp::Deref, e) = &inner.kind else {
- return;
- };
+ let init = cx.expr_or_init(e);
- let orig_cast = if is_cast_from_const_to_mut(cx, e) {
- None
- } else if let ExprKind::Path(QPath::Resolved(_, path)) = e.kind
- && let Res::Local(hir_id) = &path.res
- && let Some(orig_cast) = self.casted.get(hir_id) {
- Some(*orig_cast)
+ let orig_cast = if is_cast_from_const_to_mut(cx, init) {
+ if init.span != e.span { Some(init.span) } else { None }
} else {
return;
};
@@ -86,84 +52,113 @@ impl<'tcx> LateLintPass<'tcx> for InvalidReferenceCasting {
cx.emit_spanned_lint(
INVALID_REFERENCE_CASTING,
expr.span,
- if matches!(expr.kind, ExprKind::AddrOf(..)) {
- InvalidReferenceCastingDiag::BorrowAsMut { orig_cast }
- } else {
+ if is_assignment {
InvalidReferenceCastingDiag::AssignToRef { orig_cast }
+ } else {
+ InvalidReferenceCastingDiag::BorrowAsMut { orig_cast }
},
);
}
}
-fn is_cast_from_const_to_mut<'tcx>(cx: &LateContext<'tcx>, e: &'tcx Expr<'tcx>) -> bool {
- let e = e.peel_blocks();
-
- fn from_casts<'tcx>(cx: &LateContext<'tcx>, e: &'tcx Expr<'tcx>) -> Option<&'tcx Expr<'tcx>> {
- // <expr> as *mut ...
- let mut e = if let ExprKind::Cast(e, t) = e.kind
- && let ty::RawPtr(TypeAndMut { mutbl: Mutability::Mut, .. }) = cx.typeck_results().node_type(t.hir_id).kind() {
- e
- // <expr>.cast_mut()
- } else if let ExprKind::MethodCall(_, expr, [], _) = e.kind
- && let Some(def_id) = cx.typeck_results().type_dependent_def_id(e.hir_id)
- && cx.tcx.is_diagnostic_item(sym::ptr_cast_mut, def_id) {
+fn is_operation_we_care_about<'tcx>(
+ cx: &LateContext<'tcx>,
+ e: &'tcx Expr<'tcx>,
+) -> Option<(bool, &'tcx Expr<'tcx>)> {
+ fn deref_assign_or_addr_of<'tcx>(expr: &'tcx Expr<'tcx>) -> Option<(bool, &'tcx Expr<'tcx>)> {
+ // &mut <expr>
+ let inner = if let ExprKind::AddrOf(_, Mutability::Mut, expr) = expr.kind {
+ expr
+ // <expr> = ...
+ } else if let ExprKind::Assign(expr, _, _) = expr.kind {
+ expr
+ // <expr> += ...
+ } else if let ExprKind::AssignOp(_, expr, _) = expr.kind {
expr
} else {
return None;
};
- let mut had_at_least_one_cast = false;
- loop {
- e = e.peel_blocks();
- // <expr> as *mut/const ... or <expr> as <uint>
- e = if let ExprKind::Cast(expr, t) = e.kind
- && matches!(cx.typeck_results().node_type(t.hir_id).kind(), ty::RawPtr(_) | ty::Uint(_)) {
- had_at_least_one_cast = true;
- expr
- // <expr>.cast(), <expr>.cast_mut() or <expr>.cast_const()
- } else if let ExprKind::MethodCall(_, expr, [], _) = e.kind
- && let Some(def_id) = cx.typeck_results().type_dependent_def_id(e.hir_id)
- && matches!(
- cx.tcx.get_diagnostic_name(def_id),
- Some(sym::ptr_cast | sym::const_ptr_cast | sym::ptr_cast_mut | sym::ptr_cast_const)
- )
- {
- had_at_least_one_cast = true;
- expr
- // ptr::from_ref(<expr>)
- } else if let ExprKind::Call(path, [arg]) = e.kind
- && let ExprKind::Path(ref qpath) = path.kind
- && let Some(def_id) = cx.qpath_res(qpath, path.hir_id).opt_def_id()
- && cx.tcx.is_diagnostic_item(sym::ptr_from_ref, def_id) {
- return Some(arg);
- } else if had_at_least_one_cast {
- return Some(e);
- } else {
- return None;
- };
+ if let ExprKind::Unary(UnOp::Deref, e) = &inner.kind {
+ Some((!matches!(expr.kind, ExprKind::AddrOf(..)), e))
+ } else {
+ None
}
}
- fn from_transmute<'tcx>(
+ fn ptr_write<'tcx>(
cx: &LateContext<'tcx>,
e: &'tcx Expr<'tcx>,
- ) -> Option<&'tcx Expr<'tcx>> {
- // mem::transmute::<_, *mut _>(<expr>)
- if let ExprKind::Call(path, [arg]) = e.kind
+ ) -> Option<(bool, &'tcx Expr<'tcx>)> {
+ if let ExprKind::Call(path, [arg_ptr, _arg_val]) = e.kind
&& let ExprKind::Path(ref qpath) = path.kind
&& let Some(def_id) = cx.qpath_res(qpath, path.hir_id).opt_def_id()
- && cx.tcx.is_diagnostic_item(sym::transmute, def_id)
- && let ty::RawPtr(TypeAndMut { mutbl: Mutability::Mut, .. }) = cx.typeck_results().node_type(e.hir_id).kind() {
- Some(arg)
+ && matches!(cx.tcx.get_diagnostic_name(def_id), Some(sym::ptr_write | sym::ptr_write_volatile | sym::ptr_write_unaligned))
+ {
+ Some((true, arg_ptr))
} else {
None
}
}
- let Some(e) = from_casts(cx, e).or_else(|| from_transmute(cx, e)) else {
+ deref_assign_or_addr_of(e).or_else(|| ptr_write(cx, e))
+}
+
+fn is_cast_from_const_to_mut<'tcx>(cx: &LateContext<'tcx>, orig_expr: &'tcx Expr<'tcx>) -> bool {
+ let mut need_check_freeze = false;
+ let mut e = orig_expr;
+
+ let end_ty = cx.typeck_results().node_type(orig_expr.hir_id);
+
+ // Bail out early if the end type is **not** a mutable pointer.
+ if !matches!(end_ty.kind(), ty::RawPtr(TypeAndMut { ty: _, mutbl: Mutability::Mut })) {
return false;
- };
+ }
+
+ loop {
+ e = e.peel_blocks();
+ // <expr> as ...
+ e = if let ExprKind::Cast(expr, _) = e.kind {
+ expr
+ // <expr>.cast(), <expr>.cast_mut() or <expr>.cast_const()
+ } else if let ExprKind::MethodCall(_, expr, [], _) = e.kind
+ && let Some(def_id) = cx.typeck_results().type_dependent_def_id(e.hir_id)
+ && matches!(
+ cx.tcx.get_diagnostic_name(def_id),
+ Some(sym::ptr_cast | sym::const_ptr_cast | sym::ptr_cast_mut | sym::ptr_cast_const)
+ )
+ {
+ expr
+ // ptr::from_ref(<expr>), UnsafeCell::raw_get(<expr>) or mem::transmute<_, _>(<expr>)
+ } else if let ExprKind::Call(path, [arg]) = e.kind
+ && let ExprKind::Path(ref qpath) = path.kind
+ && let Some(def_id) = cx.qpath_res(qpath, path.hir_id).opt_def_id()
+ && matches!(
+ cx.tcx.get_diagnostic_name(def_id),
+ Some(sym::ptr_from_ref | sym::unsafe_cell_raw_get | sym::transmute)
+ )
+ {
+ if cx.tcx.is_diagnostic_item(sym::unsafe_cell_raw_get, def_id) {
+ need_check_freeze = true;
+ }
+ arg
+ } else {
+ break;
+ };
+ }
- let e = e.peel_blocks();
- matches!(cx.typeck_results().node_type(e.hir_id).kind(), ty::Ref(_, _, Mutability::Not))
+ let start_ty = cx.typeck_results().node_type(e.hir_id);
+ if let ty::Ref(_, inner_ty, Mutability::Not) = start_ty.kind() {
+ // If an UnsafeCell method is involved we need to additionaly check the
+ // inner type for the presence of the Freeze trait (ie does NOT contain
+ // an UnsafeCell), since in that case we would incorrectly lint on valid casts.
+ //
+ // We also consider non concrete skeleton types (ie generics)
+ // to be an issue since there is no way to make it safe for abitrary types.
+ !need_check_freeze
+ || inner_ty.is_freeze(cx.tcx, cx.param_env)
+ || !inner_ty.has_concrete_skeleton()
+ } else {
+ false
+ }
}
diff --git a/compiler/rustc_lint/src/tests.rs b/compiler/rustc_lint/src/tests.rs
index fc9d6f636..4fd054cb7 100644
--- a/compiler/rustc_lint/src/tests.rs
+++ b/compiler/rustc_lint/src/tests.rs
@@ -1,4 +1,4 @@
-use crate::context::parse_lint_and_tool_name;
+use crate::levels::parse_lint_and_tool_name;
use rustc_span::{create_default_session_globals_then, Symbol};
#[test]
diff --git a/compiler/rustc_lint/src/traits.rs b/compiler/rustc_lint/src/traits.rs
index 56508a2a6..e812493b3 100644
--- a/compiler/rustc_lint/src/traits.rs
+++ b/compiler/rustc_lint/src/traits.rs
@@ -96,7 +96,7 @@ impl<'tcx> LateLintPass<'tcx> for DropTraitConstraints {
};
let def_id = trait_predicate.trait_ref.def_id;
if cx.tcx.lang_items().drop_trait() == Some(def_id) {
- // Explicitly allow `impl Drop`, a drop-guards-as-Voldemort-type pattern.
+ // Explicitly allow `impl Drop`, a drop-guards-as-unnameable-type pattern.
if trait_predicate.trait_ref.self_ty().is_impl_trait() {
continue;
}
diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs
index 1ba746edd..44cf1591c 100644
--- a/compiler/rustc_lint/src/types.rs
+++ b/compiler/rustc_lint/src/types.rs
@@ -804,7 +804,7 @@ pub(crate) fn nonnull_optimization_guaranteed<'tcx>(
tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed)
}
-/// `repr(transparent)` structs can have a single non-ZST field, this function returns that
+/// `repr(transparent)` structs can have a single non-1-ZST field, this function returns that
/// field.
pub fn transparent_newtype_field<'a, 'tcx>(
tcx: TyCtxt<'tcx>,
@@ -813,8 +813,8 @@ pub fn transparent_newtype_field<'a, 'tcx>(
let param_env = tcx.param_env(variant.def_id);
variant.fields.iter().find(|field| {
let field_ty = tcx.type_of(field.did).instantiate_identity();
- let is_zst = tcx.layout_of(param_env.and(field_ty)).is_ok_and(|layout| layout.is_zst());
- !is_zst
+ let is_1zst = tcx.layout_of(param_env.and(field_ty)).is_ok_and(|layout| layout.is_1zst());
+ !is_1zst
})
}
@@ -917,13 +917,18 @@ pub(crate) fn repr_nullable_ptr<'tcx>(
// At this point, the field's type is known to be nonnull and the parent enum is Option-like.
// If the computed size for the field and the enum are different, the nonnull optimization isn't
// being applied (and we've got a problem somewhere).
- let compute_size_skeleton = |t| SizeSkeleton::compute(t, tcx, param_env).unwrap();
- if !compute_size_skeleton(ty).same_size(compute_size_skeleton(field_ty)) {
+ let compute_size_skeleton = |t| SizeSkeleton::compute(t, tcx, param_env).ok();
+ if !compute_size_skeleton(ty)?.same_size(compute_size_skeleton(field_ty)?) {
bug!("improper_ctypes: Option nonnull optimization not applied?");
}
// Return the nullable type this Option-like enum can be safely represented with.
- let field_ty_abi = &tcx.layout_of(param_env.and(field_ty)).unwrap().abi;
+ let field_ty_layout = tcx.layout_of(param_env.and(field_ty));
+ if field_ty_layout.is_err() && !field_ty.has_non_region_param() {
+ bug!("should be able to compute the layout of non-polymorphic type");
+ }
+
+ let field_ty_abi = &field_ty_layout.ok()?.abi;
if let Abi::Scalar(field_ty_scalar) = field_ty_abi {
match field_ty_scalar.valid_range(&tcx) {
WrappingRange { start: 0, end }
@@ -1266,7 +1271,6 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
| ty::Closure(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Placeholder(..)
| ty::FnDef(..) => bug!("unexpected type in foreign function: {:?}", ty),
}
diff --git a/compiler/rustc_lint/src/unused.rs b/compiler/rustc_lint/src/unused.rs
index 6041f8075..d5beff4f1 100644
--- a/compiler/rustc_lint/src/unused.rs
+++ b/compiler/rustc_lint/src/unused.rs
@@ -816,8 +816,7 @@ trait UnusedDelimLint {
let (value, ctx, followed_by_block, left_pos, right_pos, is_kw) = match e.kind {
// Do not lint `unused_braces` in `if let` expressions.
If(ref cond, ref block, _)
- if !matches!(cond.kind, Let(_, _, _))
- || Self::LINT_EXPR_IN_PATTERN_MATCHING_CTX =>
+ if !matches!(cond.kind, Let(..)) || Self::LINT_EXPR_IN_PATTERN_MATCHING_CTX =>
{
let left = e.span.lo() + rustc_span::BytePos(2);
let right = block.span.lo();
@@ -826,8 +825,7 @@ trait UnusedDelimLint {
// Do not lint `unused_braces` in `while let` expressions.
While(ref cond, ref block, ..)
- if !matches!(cond.kind, Let(_, _, _))
- || Self::LINT_EXPR_IN_PATTERN_MATCHING_CTX =>
+ if !matches!(cond.kind, Let(..)) || Self::LINT_EXPR_IN_PATTERN_MATCHING_CTX =>
{
let left = e.span.lo() + rustc_span::BytePos(5);
let right = block.span.lo();
@@ -955,11 +953,14 @@ declare_lint! {
pub struct UnusedParens {
with_self_ty_parens: bool,
+ /// `1 as (i32) < 2` parses to ExprKind::Lt
+ /// `1 as i32 < 2` parses to i32::<2[missing angle bracket]
+ parens_in_cast_in_lt: Vec<ast::NodeId>,
}
impl UnusedParens {
pub fn new() -> Self {
- Self { with_self_ty_parens: false }
+ Self { with_self_ty_parens: false, parens_in_cast_in_lt: Vec::new() }
}
}
@@ -1000,7 +1001,7 @@ impl UnusedDelimLint for UnusedParens {
self.emit_unused_delims_expr(cx, value, ctx, left_pos, right_pos, is_kw)
}
}
- ast::ExprKind::Let(_, ref expr, _) => {
+ ast::ExprKind::Let(_, ref expr, _, _) => {
self.check_unused_delims_expr(
cx,
expr,
@@ -1055,8 +1056,16 @@ impl UnusedParens {
impl EarlyLintPass for UnusedParens {
#[inline]
fn check_expr(&mut self, cx: &EarlyContext<'_>, e: &ast::Expr) {
+ if let ExprKind::Binary(op, lhs, _rhs) = &e.kind &&
+ (op.node == ast::BinOpKind::Lt || op.node == ast::BinOpKind::Shl) &&
+ let ExprKind::Cast(_expr, ty) = &lhs.kind &&
+ let ast::TyKind::Paren(_) = &ty.kind
+ {
+ self.parens_in_cast_in_lt.push(ty.id);
+ }
+
match e.kind {
- ExprKind::Let(ref pat, _, _) | ExprKind::ForLoop(ref pat, ..) => {
+ ExprKind::Let(ref pat, _, _, _) | ExprKind::ForLoop(ref pat, ..) => {
self.check_unused_parens_pat(cx, pat, false, false, (true, true));
}
// We ignore parens in cases like `if (((let Some(0) = Some(1))))` because we already
@@ -1101,6 +1110,17 @@ impl EarlyLintPass for UnusedParens {
<Self as UnusedDelimLint>::check_expr(self, cx, e)
}
+ fn check_expr_post(&mut self, _cx: &EarlyContext<'_>, e: &ast::Expr) {
+ if let ExprKind::Binary(op, lhs, _rhs) = &e.kind &&
+ (op.node == ast::BinOpKind::Lt || op.node == ast::BinOpKind::Shl) &&
+ let ExprKind::Cast(_expr, ty) = &lhs.kind &&
+ let ast::TyKind::Paren(_) = &ty.kind
+ {
+ let id = self.parens_in_cast_in_lt.pop().expect("check_expr and check_expr_post must balance");
+ assert_eq!(id, ty.id, "check_expr, check_ty, and check_expr_post are called, in that order, by the visitor");
+ }
+ }
+
fn check_pat(&mut self, cx: &EarlyContext<'_>, p: &ast::Pat) {
use ast::{Mutability, PatKind::*};
let keep_space = (false, false);
@@ -1141,6 +1161,11 @@ impl EarlyLintPass for UnusedParens {
}
fn check_ty(&mut self, cx: &EarlyContext<'_>, ty: &ast::Ty) {
+ if let ast::TyKind::Paren(_) = ty.kind &&
+ Some(&ty.id) == self.parens_in_cast_in_lt.last()
+ {
+ return;
+ }
match &ty.kind {
ast::TyKind::Array(_, len) => {
self.check_unused_delims_expr(
@@ -1284,7 +1309,7 @@ impl UnusedDelimLint for UnusedBraces {
}
}
}
- ast::ExprKind::Let(_, ref expr, _) => {
+ ast::ExprKind::Let(_, ref expr, _, _) => {
self.check_unused_delims_expr(
cx,
expr,
diff --git a/compiler/rustc_lint_defs/src/builtin.rs b/compiler/rustc_lint_defs/src/builtin.rs
index 96c31a90d..69b462d32 100644
--- a/compiler/rustc_lint_defs/src/builtin.rs
+++ b/compiler/rustc_lint_defs/src/builtin.rs
@@ -39,6 +39,7 @@ declare_lint! {
Warn,
"applying forbid to lint-groups",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #81670 <https://github.com/rust-lang/rust/issues/81670>",
};
}
@@ -74,6 +75,7 @@ declare_lint! {
Deny,
"ill-formed attribute inputs that were previously accepted and used in practice",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #57571 <https://github.com/rust-lang/rust/issues/57571>",
};
crate_level_only
@@ -110,6 +112,7 @@ declare_lint! {
Deny,
"conflicts between `#[repr(..)]` hints that were previously accepted and used in practice",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #68585 <https://github.com/rust-lang/rust/issues/68585>",
};
}
@@ -303,6 +306,7 @@ declare_lint! {
/// pub async fn uhoh() {
/// let guard = SyncThing {};
/// yield_now().await;
+ /// let _guard = guard;
/// }
/// ```
///
@@ -983,44 +987,6 @@ declare_lint! {
}
declare_lint! {
- /// The `private_in_public` lint detects private items in public
- /// interfaces not caught by the old implementation.
- ///
- /// ### Example
- ///
- /// ```rust
- /// # #![allow(unused)]
- /// struct SemiPriv;
- ///
- /// mod m1 {
- /// struct Priv;
- /// impl super::SemiPriv {
- /// pub fn f(_: Priv) {}
- /// }
- /// }
- /// # fn main() {}
- /// ```
- ///
- /// {{produces}}
- ///
- /// ### Explanation
- ///
- /// The visibility rules are intended to prevent exposing private items in
- /// public interfaces. This is a [future-incompatible] lint to transition
- /// this to a hard error in the future. See [issue #34537] for more
- /// details.
- ///
- /// [issue #34537]: https://github.com/rust-lang/rust/issues/34537
- /// [future-incompatible]: ../index.md#future-incompatible-lints
- pub PRIVATE_IN_PUBLIC,
- Warn,
- "detect private items in public interfaces not caught by the old implementation",
- @future_incompatible = FutureIncompatibleInfo {
- reference: "issue #34537 <https://github.com/rust-lang/rust/issues/34537>",
- };
-}
-
-declare_lint! {
/// The `invalid_alignment` lint detects dereferences of misaligned pointers during
/// constant evaluation.
///
@@ -1054,8 +1020,8 @@ declare_lint! {
Deny,
"raw pointers must be aligned before dereferencing",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reference: "issue #68585 <https://github.com/rust-lang/rust/issues/104616>",
- reason: FutureIncompatibilityReason::FutureReleaseErrorReportNow,
};
}
@@ -1134,6 +1100,7 @@ declare_lint! {
Deny,
"detect public re-exports of private extern crates",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #34537 <https://github.com/rust-lang/rust/issues/34537>",
};
}
@@ -1163,6 +1130,7 @@ declare_lint! {
Deny,
"type parameter default erroneously allowed in invalid location",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #36887 <https://github.com/rust-lang/rust/issues/36887>",
};
}
@@ -1305,6 +1273,7 @@ declare_lint! {
Deny,
"patterns in functions without body were erroneously allowed",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #35203 <https://github.com/rust-lang/rust/issues/35203>",
};
}
@@ -1348,6 +1317,7 @@ declare_lint! {
Deny,
"detects missing fragment specifiers in unused `macro_rules!` patterns",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #40107 <https://github.com/rust-lang/rust/issues/40107>",
};
}
@@ -1389,6 +1359,7 @@ declare_lint! {
Warn,
"detects generic lifetime arguments in path segments with late bound lifetime parameters",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #42868 <https://github.com/rust-lang/rust/issues/42868>",
};
}
@@ -1424,8 +1395,8 @@ declare_lint! {
Deny,
"trait-object types were treated as different depending on marker-trait order",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reference: "issue #56484 <https://github.com/rust-lang/rust/issues/56484>",
- reason: FutureIncompatibilityReason::FutureReleaseErrorReportNow,
};
}
@@ -1464,6 +1435,7 @@ declare_lint! {
Warn,
"distinct impls distinguished only by the leak-check code",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #56105 <https://github.com/rust-lang/rust/issues/56105>",
};
}
@@ -1655,8 +1627,8 @@ declare_lint! {
Warn,
"raw pointer to an inference variable",
@future_incompatible = FutureIncompatibleInfo {
- reference: "issue #46906 <https://github.com/rust-lang/rust/issues/46906>",
reason: FutureIncompatibilityReason::EditionError(Edition::Edition2018),
+ reference: "issue #46906 <https://github.com/rust-lang/rust/issues/46906>",
};
}
@@ -1723,8 +1695,8 @@ declare_lint! {
Warn,
"suggest using `dyn Trait` for trait objects",
@future_incompatible = FutureIncompatibleInfo {
- reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/warnings-promoted-to-error.html>",
reason: FutureIncompatibilityReason::EditionError(Edition::Edition2021),
+ reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/warnings-promoted-to-error.html>",
};
}
@@ -1778,8 +1750,8 @@ declare_lint! {
"fully qualified paths that start with a module name \
instead of `crate`, `self`, or an extern crate name",
@future_incompatible = FutureIncompatibleInfo {
- reference: "issue #53130 <https://github.com/rust-lang/rust/issues/53130>",
reason: FutureIncompatibilityReason::EditionError(Edition::Edition2018),
+ reference: "issue #53130 <https://github.com/rust-lang/rust/issues/53130>",
};
}
@@ -1827,6 +1799,7 @@ declare_lint! {
Warn,
"floating-point literals cannot be used in patterns",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #41620 <https://github.com/rust-lang/rust/issues/41620>",
};
}
@@ -1977,6 +1950,7 @@ declare_lint! {
Warn,
"checks the object safety of where clauses",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #51443 <https://github.com/rust-lang/rust/issues/51443>",
};
}
@@ -2043,8 +2017,8 @@ declare_lint! {
Deny,
"detects proc macro derives using inaccessible names from parent modules",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reference: "issue #83583 <https://github.com/rust-lang/rust/issues/83583>",
- reason: FutureIncompatibilityReason::FutureReleaseErrorReportNow,
};
}
@@ -2146,6 +2120,7 @@ declare_lint! {
"macro-expanded `macro_export` macros from the current crate \
cannot be referred to by absolute paths",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #52234 <https://github.com/rust-lang/rust/issues/52234>",
};
crate_level_only
@@ -2237,6 +2212,7 @@ declare_lint! {
Warn,
"constant used in pattern contains value of non-structural-match type in a field or a variant",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #62411 <https://github.com/rust-lang/rust/issues/62411>",
};
}
@@ -2291,6 +2267,7 @@ declare_lint! {
Allow,
"pointers are not structural-match",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #62411 <https://github.com/rust-lang/rust/issues/70861>",
};
}
@@ -2329,11 +2306,63 @@ declare_lint! {
"constant used in pattern of non-structural-match type and the constant's initializer \
expression contains values of non-structural-match types",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #73448 <https://github.com/rust-lang/rust/issues/73448>",
};
}
declare_lint! {
+ /// The `const_patterns_without_partial_eq` lint detects constants that are used in patterns,
+ /// whose type does not implement `PartialEq`.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(const_patterns_without_partial_eq)]
+ ///
+ /// trait EnumSetType {
+ /// type Repr;
+ /// }
+ ///
+ /// enum Enum8 { }
+ /// impl EnumSetType for Enum8 {
+ /// type Repr = u8;
+ /// }
+ ///
+ /// #[derive(PartialEq, Eq)]
+ /// struct EnumSet<T: EnumSetType> {
+ /// __enumset_underlying: T::Repr,
+ /// }
+ ///
+ /// const CONST_SET: EnumSet<Enum8> = EnumSet { __enumset_underlying: 3 };
+ ///
+ /// fn main() {
+ /// match CONST_SET {
+ /// CONST_SET => { /* ok */ }
+ /// _ => panic!("match fell through?"),
+ /// }
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Previous versions of Rust accepted constants in patterns, even if those constants' types
+ /// did not have `PartialEq` implemented. The compiler falls back to comparing the value
+ /// field-by-field. In the future we'd like to ensure that pattern matching always
+ /// follows `PartialEq` semantics, so that trait bound will become a requirement for
+ /// matching on constants.
+ pub CONST_PATTERNS_WITHOUT_PARTIAL_EQ,
+ Warn,
+ "constant in pattern does not implement `PartialEq`",
+ @future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
+ reference: "issue #116122 <https://github.com/rust-lang/rust/issues/116122>",
+ };
+}
+
+declare_lint! {
/// The `ambiguous_associated_items` lint detects ambiguity between
/// [associated items] and [enum variants].
///
@@ -2386,6 +2415,7 @@ declare_lint! {
Deny,
"ambiguous associated items",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #57644 <https://github.com/rust-lang/rust/issues/57644>",
};
}
@@ -2427,6 +2457,7 @@ declare_lint! {
Deny,
"a feature gate that doesn't break dependent crates",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #64266 <https://github.com/rust-lang/rust/issues/64266>",
};
}
@@ -2592,8 +2623,8 @@ declare_lint! {
///
/// The fix to this is to wrap the unsafe code in an `unsafe` block.
///
- /// This lint is "allow" by default since this will affect a large amount
- /// of existing code, and the exact plan for increasing the severity is
+ /// This lint is "allow" by default on editions up to 2021, from 2024 it is
+ /// "warn" by default; the plan for increasing severity further is
/// still being considered. See [RFC #2585] and [issue #71668] for more
/// details.
///
@@ -2605,6 +2636,7 @@ declare_lint! {
pub UNSAFE_OP_IN_UNSAFE_FN,
Allow,
"unsafe operations in unsafe functions without an explicit unsafe block are deprecated",
+ @edition Edition2024 => Warn;
}
declare_lint! {
@@ -2654,8 +2686,8 @@ declare_lint! {
Deny,
"a C-like enum implementing Drop is cast",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reference: "issue #73333 <https://github.com/rust-lang/rust/issues/73333>",
- reason: FutureIncompatibilityReason::FutureReleaseErrorReportNow,
};
}
@@ -2784,6 +2816,7 @@ declare_lint! {
Warn,
"detects a generic constant is used in a type without a emitting a warning",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #76200 <https://github.com/rust-lang/rust/issues/76200>",
};
}
@@ -2842,6 +2875,7 @@ declare_lint! {
Warn,
"uninhabited static",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #74840 <https://github.com/rust-lang/rust/issues/74840>",
};
}
@@ -3012,8 +3046,8 @@ declare_lint! {
Warn,
"trailing semicolon in macro body used as expression",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reference: "issue #79813 <https://github.com/rust-lang/rust/issues/79813>",
- reason: FutureIncompatibilityReason::FutureReleaseErrorReportNow,
};
}
@@ -3059,6 +3093,7 @@ declare_lint! {
Warn,
"detects derive helper attributes that are used before they are introduced",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #79202 <https://github.com/rust-lang/rust/issues/79202>",
};
}
@@ -3127,6 +3162,7 @@ declare_lint! {
Deny,
"detects usage of `#![cfg_attr(..., crate_type/crate_name = \"...\")]`",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #91632 <https://github.com/rust-lang/rust/issues/91632>",
};
}
@@ -3218,6 +3254,7 @@ declare_lint! {
Warn,
"transparent type contains an external ZST that is marked #[non_exhaustive] or contains private fields",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #78586 <https://github.com/rust-lang/rust/issues/78586>",
};
}
@@ -3268,6 +3305,7 @@ declare_lint! {
Warn,
"unstable syntax can change at any point in the future, causing a hard error!",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #65860 <https://github.com/rust-lang/rust/issues/65860>",
};
}
@@ -3370,12 +3408,14 @@ declare_lint_pass! {
CONFLICTING_REPR_HINTS,
CONST_EVALUATABLE_UNCHECKED,
CONST_ITEM_MUTATION,
+ CONST_PATTERNS_WITHOUT_PARTIAL_EQ,
DEAD_CODE,
DEPRECATED,
DEPRECATED_CFG_ATTR_CRATE_TYPE_NAME,
DEPRECATED_IN_FUTURE,
DEPRECATED_WHERE_CLAUSE_LOCATION,
DUPLICATE_MACRO_ATTRIBUTES,
+ ELIDED_LIFETIMES_IN_ASSOCIATED_CONSTANT,
ELIDED_LIFETIMES_IN_PATHS,
EXPORTED_PRIVATE_DEPENDENCIES,
FFI_UNWIND_CALLS,
@@ -3414,11 +3454,11 @@ declare_lint_pass! {
PATTERNS_IN_FNS_WITHOUT_BODY,
POINTER_STRUCTURAL_MATCH,
PRIVATE_BOUNDS,
- PRIVATE_IN_PUBLIC,
PRIVATE_INTERFACES,
PROC_MACRO_BACK_COMPAT,
PROC_MACRO_DERIVE_RESOLUTION_FALLBACK,
PUB_USE_OF_PRIVATE_EXTERN_CRATE,
+ REFINING_IMPL_TRAIT,
RENAMED_AND_REMOVED_LINTS,
REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS,
RUST_2021_INCOMPATIBLE_CLOSURE_CAPTURES,
@@ -3441,8 +3481,8 @@ declare_lint_pass! {
UNFULFILLED_LINT_EXPECTATIONS,
UNINHABITED_STATIC,
UNKNOWN_CRATE_TYPES,
- UNKNOWN_DIAGNOSTIC_ATTRIBUTES,
UNKNOWN_LINTS,
+ UNKNOWN_OR_MALFORMED_DIAGNOSTIC_ATTRIBUTES,
UNNAMEABLE_TEST_ITEMS,
UNNAMEABLE_TYPES,
UNREACHABLE_CODE,
@@ -3698,6 +3738,7 @@ declare_lint! {
Warn,
"detects invalid `#[doc(...)]` attributes",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #82730 <https://github.com/rust-lang/rust/issues/82730>",
};
}
@@ -3744,8 +3785,8 @@ declare_lint! {
Deny,
"detects usage of old versions of certain proc-macro crates",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reference: "issue #83125 <https://github.com/rust-lang/rust/issues/83125>",
- reason: FutureIncompatibilityReason::FutureReleaseErrorReportNow,
};
}
@@ -3783,8 +3824,8 @@ declare_lint! {
Allow,
"detects usage of old versions of or-patterns",
@future_incompatible = FutureIncompatibleInfo {
- reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/or-patterns-macro-rules.html>",
reason: FutureIncompatibilityReason::EditionError(Edition::Edition2021),
+ reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/or-patterns-macro-rules.html>",
};
}
@@ -3832,8 +3873,8 @@ declare_lint! {
"detects the usage of trait methods which are ambiguous with traits added to the \
prelude in future editions",
@future_incompatible = FutureIncompatibleInfo {
- reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/prelude.html>",
reason: FutureIncompatibilityReason::EditionError(Edition::Edition2021),
+ reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/prelude.html>",
};
}
@@ -3869,8 +3910,8 @@ declare_lint! {
Allow,
"identifiers that will be parsed as a prefix in Rust 2021",
@future_incompatible = FutureIncompatibleInfo {
- reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/reserving-syntax.html>",
reason: FutureIncompatibilityReason::EditionError(Edition::Edition2021),
+ reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/reserving-syntax.html>",
};
crate_level_only
}
@@ -3917,6 +3958,7 @@ declare_lint! {
Warn,
"use of unsupported calling convention",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #87678 <https://github.com/rust-lang/rust/issues/87678>",
};
}
@@ -4257,8 +4299,8 @@ declare_lint! {
Deny,
"impl method assumes more implied bounds than its corresponding trait method",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reference: "issue #105572 <https://github.com/rust-lang/rust/issues/105572>",
- reason: FutureIncompatibilityReason::FutureReleaseErrorReportNow,
};
}
@@ -4289,8 +4331,8 @@ declare_lint! {
Warn,
"`[u8]` or `str` used in a packed struct with `derive`",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps,
reference: "issue #107457 <https://github.com/rust-lang/rust/issues/107457>",
- reason: FutureIncompatibilityReason::FutureReleaseErrorReportNow,
};
report_in_external_macro
}
@@ -4333,9 +4375,7 @@ declare_lint! {
/// ### Example
///
/// ```rust,compile_fail
- /// # #![feature(type_privacy_lints)]
/// # #![allow(unused)]
- /// # #![allow(private_in_public)]
/// #![deny(private_interfaces)]
/// struct SemiPriv;
///
@@ -4356,9 +4396,8 @@ declare_lint! {
/// Having something private in primary interface guarantees that
/// the item will be unusable from outer modules due to type privacy.
pub PRIVATE_INTERFACES,
- Allow,
+ Warn,
"private type in primary interface of an item",
- @feature_gate = sym::type_privacy_lints;
}
declare_lint! {
@@ -4369,8 +4408,6 @@ declare_lint! {
/// ### Example
///
/// ```rust,compile_fail
- /// # #![feature(type_privacy_lints)]
- /// # #![allow(private_in_public)]
/// # #![allow(unused)]
/// #![deny(private_bounds)]
///
@@ -4388,9 +4425,8 @@ declare_lint! {
/// Having private types or traits in item bounds makes it less clear what interface
/// the item actually provides.
pub PRIVATE_BOUNDS,
- Allow,
+ Warn,
"private type in secondary interface of an item",
- @feature_gate = sym::type_privacy_lints;
}
declare_lint! {
@@ -4407,7 +4443,7 @@ declare_lint! {
/// pub struct S;
/// }
///
- /// pub fn get_voldemort() -> m::S { m::S }
+ /// pub fn get_unnameable() -> m::S { m::S }
/// # fn main() {}
/// ```
///
@@ -4457,12 +4493,14 @@ declare_lint! {
"impls that are not considered to overlap may be considered to \
overlap in the future",
@future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #114040 <https://github.com/rust-lang/rust/issues/114040>",
};
}
declare_lint! {
- /// The `unknown_diagnostic_attributes` lint detects unrecognized diagnostic attributes.
+ /// The `unknown_or_malformed_diagnostic_attributes` lint detects unrecognized or otherwise malformed
+ /// diagnostic attributes.
///
/// ### Example
///
@@ -4474,15 +4512,17 @@ declare_lint! {
///
/// {{produces}}
///
+ ///
/// ### Explanation
///
/// It is usually a mistake to specify a diagnostic attribute that does not exist. Check
/// the spelling, and check the diagnostic attribute listing for the correct name. Also
/// consider if you are using an old version of the compiler, and the attribute
/// is only available in a newer version.
- pub UNKNOWN_DIAGNOSTIC_ATTRIBUTES,
+ pub UNKNOWN_OR_MALFORMED_DIAGNOSTIC_ATTRIBUTES,
Warn,
- "unrecognized diagnostic attribute"
+ "unrecognized or malformed diagnostic attribute",
+ @feature_gate = sym::diagnostic_namespace;
}
declare_lint! {
@@ -4492,7 +4532,6 @@ declare_lint! {
/// ### Example
///
/// ```rust,compile_fail
- ///
/// #![deny(ambiguous_glob_imports)]
/// pub fn foo() -> u32 {
/// use sub::*;
@@ -4523,7 +4562,92 @@ declare_lint! {
Warn,
"detects certain glob imports that require reporting an ambiguity error",
@future_incompatible = FutureIncompatibleInfo {
- reason: FutureIncompatibilityReason::FutureReleaseError,
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
reference: "issue #114095 <https://github.com/rust-lang/rust/issues/114095>",
};
}
+
+declare_lint! {
+ /// The `refining_impl_trait` lint detects usages of return-position impl
+ /// traits in trait signatures which are refined by implementations.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![feature(return_position_impl_trait_in_trait)]
+ /// #![deny(refining_impl_trait)]
+ ///
+ /// use std::fmt::Display;
+ ///
+ /// pub trait AsDisplay {
+ /// fn as_display(&self) -> impl Display;
+ /// }
+ ///
+ /// impl<'s> AsDisplay for &'s str {
+ /// fn as_display(&self) -> Self {
+ /// *self
+ /// }
+ /// }
+ ///
+ /// fn main() {
+ /// // users can observe that the return type of
+ /// // `<&str as AsDisplay>::as_display()` is `&str`.
+ /// let x: &str = "".as_display();
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Return-position impl trait in traits (RPITITs) desugar to associated types,
+ /// and callers of methods for types where the implementation is known are
+ /// able to observe the types written in the impl signature. This may be
+ /// intended behavior, but may also pose a semver hazard for authors of libraries
+ /// who do not wish to make stronger guarantees about the types than what is
+ /// written in the trait signature.
+ pub REFINING_IMPL_TRAIT,
+ Warn,
+ "impl trait in impl method signature does not match trait method signature",
+}
+
+declare_lint! {
+ /// The `elided_lifetimes_in_associated_constant` lint detects elided lifetimes
+ /// that were erroneously allowed in associated constants.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(elided_lifetimes_in_associated_constant)]
+ ///
+ /// struct Foo;
+ ///
+ /// impl Foo {
+ /// const STR: &str = "hello, world";
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Previous version of Rust
+ ///
+ /// Implicit static-in-const behavior was decided [against] for associated
+ /// constants because of ambiguity. This, however, regressed and the compiler
+ /// erroneously treats elided lifetimes in associated constants as lifetime
+ /// parameters on the impl.
+ ///
+ /// This is a [future-incompatible] lint to transition this to a
+ /// hard error in the future.
+ ///
+ /// [against]: https://github.com/rust-lang/rust/issues/38831
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub ELIDED_LIFETIMES_IN_ASSOCIATED_CONSTANT,
+ Warn,
+ "elided lifetimes cannot be used in associated constants in impls",
+ @future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
+ reference: "issue #115010 <https://github.com/rust-lang/rust/issues/115010>",
+ };
+}
diff --git a/compiler/rustc_lint_defs/src/lib.rs b/compiler/rustc_lint_defs/src/lib.rs
index f350957f7..7ba589c3b 100644
--- a/compiler/rustc_lint_defs/src/lib.rs
+++ b/compiler/rustc_lint_defs/src/lib.rs
@@ -23,8 +23,9 @@ pub mod builtin;
#[macro_export]
macro_rules! pluralize {
+ // Pluralize based on count (e.g., apples)
($x:expr) => {
- if $x != 1 { "s" } else { "" }
+ if $x == 1 { "" } else { "s" }
};
("has", $x:expr) => {
if $x == 1 { "has" } else { "have" }
@@ -346,12 +347,18 @@ pub struct FutureIncompatibleInfo {
/// The reason for future incompatibility
#[derive(Copy, Clone, Debug)]
pub enum FutureIncompatibilityReason {
- /// This will be an error in a future release
- /// for all editions
- FutureReleaseError,
+ /// This will be an error in a future release for all editions
+ ///
+ /// This will *not* show up in cargo's future breakage report.
+ /// The warning will hence only be seen in local crates, not in dependencies.
+ FutureReleaseErrorDontReportInDeps,
/// This will be an error in a future release, and
/// Cargo should create a report even for dependencies
- FutureReleaseErrorReportNow,
+ ///
+ /// This is the *only* reason that will make future incompatibility warnings show up in cargo's
+ /// reports. All other future incompatibility warnings are not visible when they occur in a
+ /// dependency.
+ FutureReleaseErrorReportInDeps,
/// Code that changes meaning in some way in a
/// future release.
FutureReleaseSemanticsChange,
@@ -379,7 +386,7 @@ impl FutureIncompatibleInfo {
pub const fn default_fields_for_macro() -> Self {
FutureIncompatibleInfo {
reference: "",
- reason: FutureIncompatibilityReason::FutureReleaseError,
+ reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps,
explain_reason: true,
}
}
@@ -572,6 +579,10 @@ pub enum BuiltinLintDiagnostics {
/// The span of the unnecessarily-qualified path to remove.
removal_span: Span,
},
+ AssociatedConstElidedLifetime {
+ elided: bool,
+ span: Span,
+ },
}
/// Lints that are buffered up early on in the `Session` before the
@@ -713,37 +724,29 @@ macro_rules! declare_lint {
);
($(#[$attr:meta])* $vis: vis $NAME: ident, $Level: ident, $desc: expr,
$(@feature_gate = $gate:expr;)?
- $(@future_incompatible = FutureIncompatibleInfo { $($field:ident : $val:expr),* $(,)* }; )?
+ $(@future_incompatible = FutureIncompatibleInfo {
+ reason: $reason:expr,
+ $($field:ident : $val:expr),* $(,)*
+ }; )?
+ $(@edition $lint_edition:ident => $edition_level:ident;)?
$($v:ident),*) => (
$(#[$attr])*
$vis static $NAME: &$crate::Lint = &$crate::Lint {
name: stringify!($NAME),
default_level: $crate::$Level,
desc: $desc,
- edition_lint_opts: None,
is_plugin: false,
$($v: true,)*
- $(feature_gate: Some($gate),)*
+ $(feature_gate: Some($gate),)?
$(future_incompatible: Some($crate::FutureIncompatibleInfo {
+ reason: $reason,
$($field: $val,)*
..$crate::FutureIncompatibleInfo::default_fields_for_macro()
- }),)*
+ }),)?
+ $(edition_lint_opts: Some(($crate::Edition::$lint_edition, $crate::$edition_level)),)?
..$crate::Lint::default_fields_for_macro()
};
);
- ($(#[$attr:meta])* $vis: vis $NAME: ident, $Level: ident, $desc: expr,
- $lint_edition: expr => $edition_level: ident
- ) => (
- $(#[$attr])*
- $vis static $NAME: &$crate::Lint = &$crate::Lint {
- name: stringify!($NAME),
- default_level: $crate::$Level,
- desc: $desc,
- edition_lint_opts: Some(($lint_edition, $crate::Level::$edition_level)),
- report_in_external_macro: false,
- is_plugin: false,
- };
- );
}
#[macro_export]
@@ -782,16 +785,7 @@ macro_rules! declare_tool_lint {
);
}
-/// Declares a static `LintArray` and return it as an expression.
-#[macro_export]
-macro_rules! lint_array {
- ($( $lint:expr ),* ,) => { lint_array!( $($lint),* ) };
- ($( $lint:expr ),*) => {{
- vec![$($lint),*]
- }}
-}
-
-pub type LintArray = Vec<&'static Lint>;
+pub type LintVec = Vec<&'static Lint>;
pub trait LintPass {
fn name(&self) -> &'static str;
@@ -805,7 +799,7 @@ macro_rules! impl_lint_pass {
fn name(&self) -> &'static str { stringify!($ty) }
}
impl $ty {
- pub fn get_lints() -> $crate::LintArray { $crate::lint_array!($($lint),*) }
+ pub fn get_lints() -> $crate::LintVec { vec![$($lint),*] }
}
};
}
diff --git a/compiler/rustc_llvm/build.rs b/compiler/rustc_llvm/build.rs
index 4302b1618..f606fa483 100644
--- a/compiler/rustc_llvm/build.rs
+++ b/compiler/rustc_llvm/build.rs
@@ -252,7 +252,10 @@ fn main() {
} else if target.contains("windows-gnu") {
println!("cargo:rustc-link-lib=shell32");
println!("cargo:rustc-link-lib=uuid");
- } else if target.contains("haiku") || target.contains("darwin") {
+ } else if target.contains("haiku")
+ || target.contains("darwin")
+ || (is_crossed && (target.contains("dragonfly") || target.contains("solaris")))
+ {
println!("cargo:rustc-link-lib=z");
} else if target.contains("netbsd") {
println!("cargo:rustc-link-lib=z");
diff --git a/compiler/rustc_llvm/llvm-wrapper/ArchiveWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/ArchiveWrapper.cpp
index 35d6b9ed7..54fdc84c7 100644
--- a/compiler/rustc_llvm/llvm-wrapper/ArchiveWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/ArchiveWrapper.cpp
@@ -203,7 +203,12 @@ LLVMRustWriteArchive(char *Dst, size_t NumMembers,
}
}
+#if LLVM_VERSION_LT(18, 0)
auto Result = writeArchive(Dst, Members, WriteSymbtab, Kind, true, false);
+#else
+ auto SymtabMode = WriteSymbtab ? SymtabWritingMode::NormalSymtab : SymtabWritingMode::NoSymtab;
+ auto Result = writeArchive(Dst, Members, SymtabMode, Kind, true, false);
+#endif
if (!Result)
return LLVMRustResult::Success;
LLVMRustSetLastError(toString(std::move(Result)).c_str());
diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
index b566ea496..b729c4022 100644
--- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
@@ -1,5 +1,6 @@
#include <stdio.h>
+#include <cstddef>
#include <iomanip>
#include <vector>
#include <set>
@@ -9,6 +10,7 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/CommandFlags.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/AutoUpgrade.h"
#include "llvm/IR/AssemblyAnnotationWriter.h"
@@ -50,6 +52,8 @@
using namespace llvm;
+static codegen::RegisterCodeGenFlags CGF;
+
typedef struct LLVMOpaquePass *LLVMPassRef;
typedef struct LLVMOpaqueTargetMachine *LLVMTargetMachineRef;
@@ -235,16 +239,22 @@ enum class LLVMRustCodeGenOptLevel {
Aggressive,
};
-static CodeGenOpt::Level fromRust(LLVMRustCodeGenOptLevel Level) {
+#if LLVM_VERSION_GE(18, 0)
+ using CodeGenOptLevelEnum = llvm::CodeGenOptLevel;
+#else
+ using CodeGenOptLevelEnum = llvm::CodeGenOpt::Level;
+#endif
+
+static CodeGenOptLevelEnum fromRust(LLVMRustCodeGenOptLevel Level) {
switch (Level) {
case LLVMRustCodeGenOptLevel::None:
- return CodeGenOpt::None;
+ return CodeGenOptLevelEnum::None;
case LLVMRustCodeGenOptLevel::Less:
- return CodeGenOpt::Less;
+ return CodeGenOptLevelEnum::Less;
case LLVMRustCodeGenOptLevel::Default:
- return CodeGenOpt::Default;
+ return CodeGenOptLevelEnum::Default;
case LLVMRustCodeGenOptLevel::Aggressive:
- return CodeGenOpt::Aggressive;
+ return CodeGenOptLevelEnum::Aggressive;
default:
report_fatal_error("Bad CodeGenOptLevel.");
}
@@ -321,13 +331,13 @@ extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef TM,
PrintBackendInfo Print,
void* Out) {
const TargetMachine *Target = unwrap(TM);
- const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
const Triple::ArchType HostArch = Triple(sys::getDefaultTargetTriple()).getArch();
const Triple::ArchType TargetArch = Target->getTargetTriple().getArch();
std::ostringstream Buf;
#if LLVM_VERSION_GE(17, 0)
+ const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
const ArrayRef<SubtargetSubTypeKV> CPUTable = MCInfo->getAllProcessorDescriptions();
#else
Buf << "Full target CPU help is not supported by this LLVM version.\n\n";
@@ -406,7 +416,10 @@ extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine(
bool RelaxELFRelocations,
bool UseInitArray,
const char *SplitDwarfFile,
- bool ForceEmulatedTls) {
+ const char *OutputObjFile,
+ const char *DebugInfoCompression,
+ bool ForceEmulatedTls,
+ const char *ArgsCstrBuff, size_t ArgsCstrBuffLen) {
auto OptLevel = fromRust(RustOptLevel);
auto RM = fromRust(RustReloc);
@@ -421,7 +434,7 @@ extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine(
return nullptr;
}
- TargetOptions Options;
+ TargetOptions Options = codegen::InitTargetOptionsFromCodeGenFlags(Trip);
Options.FloatABIType = FloatABI::Default;
if (UseSoftFloat) {
@@ -436,6 +449,19 @@ extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine(
if (SplitDwarfFile) {
Options.MCOptions.SplitDwarfFile = SplitDwarfFile;
}
+ if (OutputObjFile) {
+ Options.ObjectFilenameForDebug = OutputObjFile;
+ }
+#if LLVM_VERSION_GE(16, 0)
+ if (!strcmp("zlib", DebugInfoCompression) && llvm::compression::zlib::isAvailable()) {
+ Options.CompressDebugSections = DebugCompressionType::Zlib;
+ } else if (!strcmp("zstd", DebugInfoCompression) && llvm::compression::zstd::isAvailable()) {
+ Options.CompressDebugSections = DebugCompressionType::Zstd;
+ } else if (!strcmp("none", DebugInfoCompression)) {
+ Options.CompressDebugSections = DebugCompressionType::None;
+ }
+#endif
+
Options.RelaxELFRelocations = RelaxELFRelocations;
Options.UseInitArray = UseInitArray;
@@ -462,12 +488,48 @@ extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine(
Options.EmitStackSizeSection = EmitStackSizeSection;
+
+ if (ArgsCstrBuff != nullptr)
+ {
+ int buffer_offset = 0;
+ assert(ArgsCstrBuff[ArgsCstrBuffLen - 1] == '\0');
+
+ const size_t arg0_len = std::strlen(ArgsCstrBuff);
+ char* arg0 = new char[arg0_len + 1];
+ memcpy(arg0, ArgsCstrBuff, arg0_len);
+ arg0[arg0_len] = '\0';
+ buffer_offset += arg0_len + 1;
+
+ const int num_cmd_arg_strings =
+ std::count(&ArgsCstrBuff[buffer_offset], &ArgsCstrBuff[ArgsCstrBuffLen], '\0');
+
+ std::string* cmd_arg_strings = new std::string[num_cmd_arg_strings];
+ for (int i = 0; i < num_cmd_arg_strings; ++i)
+ {
+ assert(buffer_offset < ArgsCstrBuffLen);
+ const int len = std::strlen(ArgsCstrBuff + buffer_offset);
+ cmd_arg_strings[i] = std::string(&ArgsCstrBuff[buffer_offset], len);
+ buffer_offset += len + 1;
+ }
+
+ assert(buffer_offset == ArgsCstrBuffLen);
+
+ Options.MCOptions.Argv0 = arg0;
+ Options.MCOptions.CommandLineArgs =
+ llvm::ArrayRef<std::string>(cmd_arg_strings, num_cmd_arg_strings);
+ }
+
TargetMachine *TM = TheTarget->createTargetMachine(
Trip.getTriple(), CPU, Feature, Options, RM, CM, OptLevel);
return wrap(TM);
}
extern "C" void LLVMRustDisposeTargetMachine(LLVMTargetMachineRef TM) {
+
+ MCTargetOptions& MCOptions = unwrap(TM)->Options.MCOptions;
+ delete[] MCOptions.Argv0;
+ delete[] MCOptions.CommandLineArgs.data();
+
delete unwrap(TM);
}
@@ -502,9 +564,17 @@ enum class LLVMRustFileType {
static CodeGenFileType fromRust(LLVMRustFileType Type) {
switch (Type) {
case LLVMRustFileType::AssemblyFile:
+#if LLVM_VERSION_GE(18, 0)
+ return CodeGenFileType::AssemblyFile;
+#else
return CGFT_AssemblyFile;
+#endif
case LLVMRustFileType::ObjectFile:
+#if LLVM_VERSION_GE(18, 0)
+ return CodeGenFileType::ObjectFile;
+#else
return CGFT_ObjectFile;
+#endif
default:
report_fatal_error("Bad FileType.");
}
@@ -1058,6 +1128,13 @@ extern "C" void LLVMRustPrintPasses() {
extern "C" void LLVMRustRunRestrictionPass(LLVMModuleRef M, char **Symbols,
size_t Len) {
auto PreserveFunctions = [=](const GlobalValue &GV) {
+ // Preserve LLVM-injected, ASAN-related symbols.
+ // See also https://github.com/rust-lang/rust/issues/113404.
+ if (GV.getName() == "___asan_globals_registered") {
+ return true;
+ }
+
+ // Preserve symbols exported from Rust modules.
for (size_t I = 0; I < Len; I++) {
if (GV.getName() == Symbols[I]) {
return true;
@@ -1202,7 +1279,11 @@ LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
Ret->ModuleMap[module->identifier] = mem_buffer;
+#if LLVM_VERSION_GE(18, 0)
+ if (Error Err = readModuleSummaryIndex(mem_buffer, Ret->Index)) {
+#else
if (Error Err = readModuleSummaryIndex(mem_buffer, Ret->Index, i)) {
+#endif
LLVMRustSetLastError(toString(std::move(Err)).c_str());
return nullptr;
}
@@ -1507,6 +1588,38 @@ LLVMRustGetBitcodeSliceFromObjectData(const char *data,
return BitcodeOrError->getBufferStart();
}
+// Find a section of an object file by name. Fail if the section is missing or
+// empty.
+extern "C" const char *LLVMRustGetSliceFromObjectDataByName(const char *data,
+ size_t len,
+ const char *name,
+ size_t *out_len) {
+ *out_len = 0;
+ StringRef Data(data, len);
+ MemoryBufferRef Buffer(Data, ""); // The id is unused.
+ file_magic Type = identify_magic(Buffer.getBuffer());
+ Expected<std::unique_ptr<object::ObjectFile>> ObjFileOrError =
+ object::ObjectFile::createObjectFile(Buffer, Type);
+ if (!ObjFileOrError) {
+ LLVMRustSetLastError(toString(ObjFileOrError.takeError()).c_str());
+ return nullptr;
+ }
+ for (const object::SectionRef &Sec : (*ObjFileOrError)->sections()) {
+ Expected<StringRef> Name = Sec.getName();
+ if (Name && *Name == name) {
+ Expected<StringRef> SectionOrError = Sec.getContents();
+ if (!SectionOrError) {
+ LLVMRustSetLastError(toString(SectionOrError.takeError()).c_str());
+ return nullptr;
+ }
+ *out_len = SectionOrError->size();
+ return SectionOrError->data();
+ }
+ }
+ LLVMRustSetLastError("could not find requested section");
+ return nullptr;
+}
+
// Computes the LTO cache key for the provided 'ModId' in the given 'Data',
// storing the result in 'KeyOut'.
// Currently, this cache key is a SHA-1 hash of anything that could affect
diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
index 70cdf3d6d..4390486b0 100644
--- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
@@ -2044,3 +2044,19 @@ extern "C" bool LLVMRustIsNonGVFunctionPointerTy(LLVMValueRef V) {
}
return false;
}
+
+extern "C" bool LLVMRustLLVMHasZlibCompressionForDebugSymbols() {
+#if LLVM_VERSION_GE(16, 0)
+ return llvm::compression::zlib::isAvailable();
+#else
+ return false;
+#endif
+}
+
+extern "C" bool LLVMRustLLVMHasZstdCompressionForDebugSymbols() {
+#if LLVM_VERSION_GE(16, 0)
+ return llvm::compression::zstd::isAvailable();
+#else
+ return false;
+#endif
+}
diff --git a/compiler/rustc_macros/src/lib.rs b/compiler/rustc_macros/src/lib.rs
index f4593d0fe..85829906f 100644
--- a/compiler/rustc_macros/src/lib.rs
+++ b/compiler/rustc_macros/src/lib.rs
@@ -7,7 +7,7 @@
#![allow(rustc::default_hash_types)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#![recursion_limit = "128"]
use synstructure::decl_derive;
diff --git a/compiler/rustc_metadata/messages.ftl b/compiler/rustc_metadata/messages.ftl
index cc58d51be..633004fdd 100644
--- a/compiler/rustc_metadata/messages.ftl
+++ b/compiler/rustc_metadata/messages.ftl
@@ -259,9 +259,6 @@ metadata_std_required =
metadata_symbol_conflicts_current =
the current crate is indistinguishable from one of its dependencies: it has the same crate-name `{$crate_name}` and was compiled with the same `-C metadata` arguments. This will result in symbol conflicts between the two.
-metadata_symbol_conflicts_others =
- found two different crates with name `{$crate_name}` that are not distinguished by differing `-C metadata`. This will result in symbol conflicts between the two.
-
metadata_target_no_std_support =
the `{$locator_triple}` target may not support the standard library
diff --git a/compiler/rustc_metadata/src/creader.rs b/compiler/rustc_metadata/src/creader.rs
index fce80ab37..692214753 100644
--- a/compiler/rustc_metadata/src/creader.rs
+++ b/compiler/rustc_metadata/src/creader.rs
@@ -8,7 +8,7 @@ use rustc_ast::expand::allocator::{alloc_error_handler_name, global_fn_name, All
use rustc_ast::{self as ast, *};
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::svh::Svh;
-use rustc_data_structures::sync::{MappedReadGuard, MappedWriteGuard, ReadGuard, WriteGuard};
+use rustc_data_structures::sync::{FreezeReadGuard, FreezeWriteGuard};
use rustc_expand::base::SyntaxExtension;
use rustc_hir::def_id::{CrateNum, LocalDefId, StableCrateId, StableCrateIdMap, LOCAL_CRATE};
use rustc_hir::definitions::Definitions;
@@ -134,14 +134,14 @@ impl<'a> std::fmt::Debug for CrateDump<'a> {
}
impl CStore {
- pub fn from_tcx(tcx: TyCtxt<'_>) -> MappedReadGuard<'_, CStore> {
- ReadGuard::map(tcx.untracked().cstore.read(), |cstore| {
+ pub fn from_tcx(tcx: TyCtxt<'_>) -> FreezeReadGuard<'_, CStore> {
+ FreezeReadGuard::map(tcx.untracked().cstore.read(), |cstore| {
cstore.as_any().downcast_ref::<CStore>().expect("`tcx.cstore` is not a `CStore`")
})
}
- pub fn from_tcx_mut(tcx: TyCtxt<'_>) -> MappedWriteGuard<'_, CStore> {
- WriteGuard::map(tcx.untracked().cstore.write(), |cstore| {
+ pub fn from_tcx_mut(tcx: TyCtxt<'_>) -> FreezeWriteGuard<'_, CStore> {
+ FreezeWriteGuard::map(tcx.untracked().cstore.write(), |cstore| {
cstore.untracked_as_any().downcast_mut().expect("`tcx.cstore` is not a `CStore`")
})
}
diff --git a/compiler/rustc_metadata/src/fs.rs b/compiler/rustc_metadata/src/fs.rs
index 2a9662b80..7eb2a347d 100644
--- a/compiler/rustc_metadata/src/fs.rs
+++ b/compiler/rustc_metadata/src/fs.rs
@@ -5,7 +5,6 @@ use crate::errors::{
use crate::{encode_metadata, EncodedMetadata};
use rustc_data_structures::temp_dir::MaybeTempDir;
-use rustc_hir::def_id::LOCAL_CRATE;
use rustc_middle::ty::TyCtxt;
use rustc_session::config::{OutFileName, OutputType};
use rustc_session::output::filename_for_metadata;
@@ -40,8 +39,7 @@ pub fn emit_wrapper_file(
}
pub fn encode_and_write_metadata(tcx: TyCtxt<'_>) -> (EncodedMetadata, bool) {
- let crate_name = tcx.crate_name(LOCAL_CRATE);
- let out_filename = filename_for_metadata(tcx.sess, crate_name, tcx.output_filenames(()));
+ let out_filename = filename_for_metadata(tcx.sess, tcx.output_filenames(()));
// To avoid races with another rustc process scanning the output directory,
// we need to write the file somewhere else and atomically move it to its
// final destination, with an `fs::rename` call. In order for the rename to
diff --git a/compiler/rustc_metadata/src/lib.rs b/compiler/rustc_metadata/src/lib.rs
index 87373d997..fa77b36c4 100644
--- a/compiler/rustc_metadata/src/lib.rs
+++ b/compiler/rustc_metadata/src/lib.rs
@@ -26,7 +26,7 @@ extern crate rustc_middle;
#[macro_use]
extern crate tracing;
-pub use rmeta::{provide, provide_extern};
+pub use rmeta::provide;
use rustc_errors::{DiagnosticMessage, SubdiagnosticMessage};
use rustc_fluent_macro::fluent_messages;
@@ -42,6 +42,6 @@ pub mod locator;
pub use fs::{emit_wrapper_file, METADATA_FILENAME};
pub use native_libs::find_native_static_library;
-pub use rmeta::{encode_metadata, EncodedMetadata, METADATA_HEADER};
+pub use rmeta::{encode_metadata, rendered_const, EncodedMetadata, METADATA_HEADER};
fluent_messages! { "../messages.ftl" }
diff --git a/compiler/rustc_metadata/src/locator.rs b/compiler/rustc_metadata/src/locator.rs
index bf6004ba8..3062939d8 100644
--- a/compiler/rustc_metadata/src/locator.rs
+++ b/compiler/rustc_metadata/src/locator.rs
@@ -903,10 +903,11 @@ pub fn list_file_metadata(
path: &Path,
metadata_loader: &dyn MetadataLoader,
out: &mut dyn Write,
+ ls_kinds: &[String],
) -> IoResult<()> {
let flavor = get_flavor_from_path(path);
match get_metadata_section(target, flavor, path, metadata_loader) {
- Ok(metadata) => metadata.list_crate_metadata(out),
+ Ok(metadata) => metadata.list_crate_metadata(out, ls_kinds),
Err(msg) => write!(out, "{msg}\n"),
}
}
diff --git a/compiler/rustc_metadata/src/rmeta/decoder.rs b/compiler/rustc_metadata/src/rmeta/decoder.rs
index e8f66c36a..b189e79df 100644
--- a/compiler/rustc_metadata/src/rmeta/decoder.rs
+++ b/compiler/rustc_metadata/src/rmeta/decoder.rs
@@ -9,7 +9,7 @@ use rustc_data_structures::captures::Captures;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::owned_slice::OwnedSlice;
use rustc_data_structures::svh::Svh;
-use rustc_data_structures::sync::{AppendOnlyVec, AtomicBool, Lock, Lrc, OnceCell};
+use rustc_data_structures::sync::{AppendOnlyVec, AtomicBool, Lock, Lrc, OnceLock};
use rustc_data_structures::unhash::UnhashMap;
use rustc_expand::base::{SyntaxExtension, SyntaxExtensionKind};
use rustc_expand::proc_macro::{AttrProcMacro, BangProcMacro, DeriveProcMacro};
@@ -24,7 +24,6 @@ use rustc_middle::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo};
use rustc_middle::mir::interpret::{AllocDecodingSession, AllocDecodingState};
use rustc_middle::ty::codec::TyDecoder;
use rustc_middle::ty::fast_reject::SimplifiedType;
-use rustc_middle::ty::GeneratorDiagnosticData;
use rustc_middle::ty::{self, ParameterizedOverTcx, Ty, TyCtxt, Visibility};
use rustc_serialize::opaque::MemDecoder;
use rustc_serialize::{Decodable, Decoder};
@@ -44,7 +43,6 @@ use std::sync::atomic::Ordering;
use std::{io, iter, mem};
pub(super) use cstore_impl::provide;
-pub use cstore_impl::provide_extern;
use rustc_span::hygiene::HygieneDecodeContext;
mod cstore_impl;
@@ -93,7 +91,7 @@ pub(crate) struct CrateMetadata {
/// For every definition in this crate, maps its `DefPathHash` to its `DefIndex`.
def_path_hash_map: DefPathHashMapRef<'static>,
/// Likewise for ExpnHash.
- expn_hash_map: OnceCell<UnhashMap<ExpnHash, ExpnIndex>>,
+ expn_hash_map: OnceLock<UnhashMap<ExpnHash, ExpnIndex>>,
/// Used for decoding interpret::AllocIds in a cached & thread-safe manner.
alloc_decoding_state: AllocDecodingState,
/// Caches decoded `DefKey`s.
@@ -250,6 +248,7 @@ impl<'a, 'tcx> Metadata<'a, 'tcx> for (CrateMetadataRef<'a>, TyCtxt<'tcx>) {
}
impl<T: ParameterizedOverTcx> LazyValue<T> {
+ #[inline]
fn decode<'a, 'tcx, M: Metadata<'a, 'tcx>>(self, metadata: M) -> T::Value<'tcx>
where
T::Value<'tcx>: Decodable<DecodeContext<'a, 'tcx>>,
@@ -294,6 +293,7 @@ unsafe impl<'a, 'tcx, T: Decodable<DecodeContext<'a, 'tcx>>> TrustedLen
}
impl<T: ParameterizedOverTcx> LazyArray<T> {
+ #[inline]
fn decode<'a, 'tcx, M: Metadata<'a, 'tcx>>(
self,
metadata: M,
@@ -360,8 +360,8 @@ impl<'a, 'tcx> DecodeContext<'a, 'tcx> {
self.read_lazy_offset_then(|pos| LazyArray::from_position_and_num_elems(pos, len))
}
- fn read_lazy_table<I, T>(&mut self, len: usize) -> LazyTable<I, T> {
- self.read_lazy_offset_then(|pos| LazyTable::from_position_and_encoded_size(pos, len))
+ fn read_lazy_table<I, T>(&mut self, width: usize, len: usize) -> LazyTable<I, T> {
+ self.read_lazy_offset_then(|pos| LazyTable::from_position_and_encoded_size(pos, width, len))
}
#[inline]
@@ -420,6 +420,7 @@ impl<'a, 'tcx> TyDecoder for DecodeContext<'a, 'tcx> {
}
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for CrateNum {
+ #[inline]
fn decode(d: &mut DecodeContext<'a, 'tcx>) -> CrateNum {
let cnum = CrateNum::from_u32(d.read_u32());
d.map_encoded_cnum_to_current(cnum)
@@ -427,18 +428,21 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for CrateNum {
}
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for DefIndex {
+ #[inline]
fn decode(d: &mut DecodeContext<'a, 'tcx>) -> DefIndex {
DefIndex::from_u32(d.read_u32())
}
}
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ExpnIndex {
+ #[inline]
fn decode(d: &mut DecodeContext<'a, 'tcx>) -> ExpnIndex {
ExpnIndex::from_u32(d.read_u32())
}
}
impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ast::AttrId {
+ #[inline]
fn decode(d: &mut DecodeContext<'a, 'tcx>) -> ast::AttrId {
let sess = d.sess.expect("can't decode AttrId without Session");
sess.parse_sess.attr_id_generator.mk_attr_id()
@@ -672,6 +676,7 @@ impl<'a, 'tcx, T> Decodable<DecodeContext<'a, 'tcx>> for LazyValue<T> {
}
impl<'a, 'tcx, T> Decodable<DecodeContext<'a, 'tcx>> for LazyArray<T> {
+ #[inline]
fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Self {
let len = decoder.read_usize();
if len == 0 { LazyArray::default() } else { decoder.read_lazy_array(len) }
@@ -680,8 +685,9 @@ impl<'a, 'tcx, T> Decodable<DecodeContext<'a, 'tcx>> for LazyArray<T> {
impl<'a, 'tcx, I: Idx, T> Decodable<DecodeContext<'a, 'tcx>> for LazyTable<I, T> {
fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Self {
+ let width = decoder.read_usize();
let len = decoder.read_usize();
- decoder.read_lazy_table(len)
+ decoder.read_lazy_table(width, len)
}
}
@@ -717,25 +723,196 @@ impl MetadataBlob {
LazyValue::<CrateRoot>::from_position(NonZeroUsize::new(pos).unwrap()).decode(self)
}
- pub(crate) fn list_crate_metadata(&self, out: &mut dyn io::Write) -> io::Result<()> {
+ pub(crate) fn list_crate_metadata(
+ &self,
+ out: &mut dyn io::Write,
+ ls_kinds: &[String],
+ ) -> io::Result<()> {
let root = self.get_root();
- writeln!(out, "Crate info:")?;
- writeln!(out, "name {}{}", root.name(), root.extra_filename)?;
- writeln!(out, "hash {} stable_crate_id {:?}", root.hash(), root.stable_crate_id)?;
- writeln!(out, "proc_macro {:?}", root.proc_macro_data.is_some())?;
- writeln!(out, "=External Dependencies=")?;
-
- for (i, dep) in root.crate_deps.decode(self).enumerate() {
- let CrateDep { name, extra_filename, hash, host_hash, kind, is_private } = dep;
- let number = i + 1;
-
- writeln!(
- out,
- "{number} {name}{extra_filename} hash {hash} host_hash {host_hash:?} kind {kind:?} {privacy}",
- privacy = if is_private { "private" } else { "public" }
- )?;
+
+ let all_ls_kinds = vec![
+ "root".to_owned(),
+ "lang_items".to_owned(),
+ "features".to_owned(),
+ "items".to_owned(),
+ ];
+ let ls_kinds = if ls_kinds.contains(&"all".to_owned()) { &all_ls_kinds } else { ls_kinds };
+
+ for kind in ls_kinds {
+ match &**kind {
+ "root" => {
+ writeln!(out, "Crate info:")?;
+ writeln!(out, "name {}{}", root.name(), root.extra_filename)?;
+ writeln!(
+ out,
+ "hash {} stable_crate_id {:?}",
+ root.hash(),
+ root.stable_crate_id
+ )?;
+ writeln!(out, "proc_macro {:?}", root.proc_macro_data.is_some())?;
+ writeln!(out, "triple {}", root.header.triple.triple())?;
+ writeln!(out, "edition {}", root.edition)?;
+ writeln!(out, "symbol_mangling_version {:?}", root.symbol_mangling_version)?;
+ writeln!(
+ out,
+ "required_panic_strategy {:?} panic_in_drop_strategy {:?}",
+ root.required_panic_strategy, root.panic_in_drop_strategy
+ )?;
+ writeln!(
+ out,
+ "has_global_allocator {} has_alloc_error_handler {} has_panic_handler {} has_default_lib_allocator {}",
+ root.has_global_allocator,
+ root.has_alloc_error_handler,
+ root.has_panic_handler,
+ root.has_default_lib_allocator
+ )?;
+ writeln!(
+ out,
+ "compiler_builtins {} needs_allocator {} needs_panic_runtime {} no_builtins {} panic_runtime {} profiler_runtime {}",
+ root.compiler_builtins,
+ root.needs_allocator,
+ root.needs_panic_runtime,
+ root.no_builtins,
+ root.panic_runtime,
+ root.profiler_runtime
+ )?;
+
+ writeln!(out, "=External Dependencies=")?;
+ let dylib_dependency_formats =
+ root.dylib_dependency_formats.decode(self).collect::<Vec<_>>();
+ for (i, dep) in root.crate_deps.decode(self).enumerate() {
+ let CrateDep { name, extra_filename, hash, host_hash, kind, is_private } =
+ dep;
+ let number = i + 1;
+
+ writeln!(
+ out,
+ "{number} {name}{extra_filename} hash {hash} host_hash {host_hash:?} kind {kind:?} {privacy}{linkage}",
+ privacy = if is_private { "private" } else { "public" },
+ linkage = if dylib_dependency_formats.is_empty() {
+ String::new()
+ } else {
+ format!(" linkage {:?}", dylib_dependency_formats[i])
+ }
+ )?;
+ }
+ write!(out, "\n")?;
+ }
+
+ "lang_items" => {
+ writeln!(out, "=Lang items=")?;
+ for (id, lang_item) in root.lang_items.decode(self) {
+ writeln!(
+ out,
+ "{} = crate{}",
+ lang_item.name(),
+ DefPath::make(LOCAL_CRATE, id, |parent| root
+ .tables
+ .def_keys
+ .get(self, parent)
+ .unwrap()
+ .decode(self))
+ .to_string_no_crate_verbose()
+ )?;
+ }
+ for lang_item in root.lang_items_missing.decode(self) {
+ writeln!(out, "{} = <missing>", lang_item.name())?;
+ }
+ write!(out, "\n")?;
+ }
+
+ "features" => {
+ writeln!(out, "=Lib features=")?;
+ for (feature, since) in root.lib_features.decode(self) {
+ writeln!(
+ out,
+ "{}{}",
+ feature,
+ if let Some(since) = since {
+ format!(" since {since}")
+ } else {
+ String::new()
+ }
+ )?;
+ }
+ write!(out, "\n")?;
+ }
+
+ "items" => {
+ writeln!(out, "=Items=")?;
+
+ fn print_item(
+ blob: &MetadataBlob,
+ out: &mut dyn io::Write,
+ item: DefIndex,
+ indent: usize,
+ ) -> io::Result<()> {
+ let root = blob.get_root();
+
+ let def_kind = root.tables.opt_def_kind.get(blob, item).unwrap();
+ let def_key = root.tables.def_keys.get(blob, item).unwrap().decode(blob);
+ let def_name = if item == CRATE_DEF_INDEX {
+ rustc_span::symbol::kw::Crate
+ } else {
+ def_key
+ .disambiguated_data
+ .data
+ .get_opt_name()
+ .unwrap_or_else(|| Symbol::intern("???"))
+ };
+ let visibility =
+ root.tables.visibility.get(blob, item).unwrap().decode(blob).map_id(
+ |index| {
+ format!(
+ "crate{}",
+ DefPath::make(LOCAL_CRATE, index, |parent| root
+ .tables
+ .def_keys
+ .get(blob, parent)
+ .unwrap()
+ .decode(blob))
+ .to_string_no_crate_verbose()
+ )
+ },
+ );
+ write!(
+ out,
+ "{nil: <indent$}{:?} {:?} {} {{",
+ visibility,
+ def_kind,
+ def_name,
+ nil = "",
+ )?;
+
+ if let Some(children) =
+ root.tables.module_children_non_reexports.get(blob, item)
+ {
+ write!(out, "\n")?;
+ for child in children.decode(blob) {
+ print_item(blob, out, child, indent + 4)?;
+ }
+ writeln!(out, "{nil: <indent$}}}", nil = "")?;
+ } else {
+ writeln!(out, "}}")?;
+ }
+
+ Ok(())
+ }
+
+ print_item(self, out, CRATE_DEF_INDEX, 0)?;
+
+ write!(out, "\n")?;
+ }
+
+ _ => {
+ writeln!(
+ out,
+ "unknown -Zls kind. allowed values are: all, root, lang_items, features, items"
+ )?;
+ }
+ }
}
- write!(out, "\n")?;
+
Ok(())
}
}
@@ -1493,11 +1670,12 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
// We can't reuse an existing SourceFile, so allocate a new one
// containing the information we need.
+ let original_end_pos = source_file_to_import.end_position();
let rustc_span::SourceFile {
mut name,
src_hash,
- start_pos,
- end_pos,
+ start_pos: original_start_pos,
+ source_len,
lines,
multibyte_chars,
non_narrow_chars,
@@ -1539,59 +1717,38 @@ impl<'a, 'tcx> CrateMetadataRef<'a> {
// on `try_to_translate_virtual_to_real`).
try_to_translate_virtual_to_real(&mut name);
- let source_length = (end_pos - start_pos).to_usize();
-
let local_version = sess.source_map().new_imported_source_file(
name,
src_hash,
name_hash,
- source_length,
+ source_len.to_u32(),
self.cnum,
lines,
multibyte_chars,
non_narrow_chars,
normalized_pos,
- start_pos,
source_file_index,
);
debug!(
"CrateMetaData::imported_source_files alloc \
- source_file {:?} original (start_pos {:?} end_pos {:?}) \
- translated (start_pos {:?} end_pos {:?})",
+ source_file {:?} original (start_pos {:?} source_len {:?}) \
+ translated (start_pos {:?} source_len {:?})",
local_version.name,
- start_pos,
- end_pos,
+ original_start_pos,
+ source_len,
local_version.start_pos,
- local_version.end_pos
+ local_version.source_len
);
ImportedSourceFile {
- original_start_pos: start_pos,
- original_end_pos: end_pos,
+ original_start_pos,
+ original_end_pos,
translated_source_file: local_version,
}
})
.clone()
}
- fn get_generator_diagnostic_data(
- self,
- tcx: TyCtxt<'tcx>,
- id: DefIndex,
- ) -> Option<GeneratorDiagnosticData<'tcx>> {
- self.root
- .tables
- .generator_diagnostic_data
- .get(self, id)
- .map(|param| param.decode((self, tcx)))
- .map(|generator_data| GeneratorDiagnosticData {
- generator_interior_types: generator_data.generator_interior_types,
- hir_owner: generator_data.hir_owner,
- nodes_types: generator_data.nodes_types,
- adjustments: generator_data.adjustments,
- })
- }
-
fn get_attr_flags(self, index: DefIndex) -> AttrFlags {
self.root.tables.attr_flags.get(self, index)
}
diff --git a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
index aeda8af6d..f27eee0d7 100644
--- a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
+++ b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
@@ -14,10 +14,11 @@ use rustc_middle::arena::ArenaAllocatable;
use rustc_middle::metadata::ModChild;
use rustc_middle::middle::exported_symbols::ExportedSymbol;
use rustc_middle::middle::stability::DeprecationEntry;
+use rustc_middle::query::ExternProviders;
use rustc_middle::query::LocalCrate;
-use rustc_middle::query::{ExternProviders, Providers};
use rustc_middle::ty::fast_reject::SimplifiedType;
use rustc_middle::ty::{self, TyCtxt};
+use rustc_middle::util::Providers;
use rustc_session::cstore::CrateStore;
use rustc_session::{Session, StableCrateId};
use rustc_span::hygiene::{ExpnHash, ExpnId};
@@ -126,12 +127,12 @@ macro_rules! provide_one {
// External query providers call `crate_hash` in order to register a dependency
// on the crate metadata. The exception is `crate_hash` itself, which obviously
// doesn't need to do this (and can't, as it would cause a query cycle).
- use rustc_middle::dep_graph::DepKind;
- if DepKind::$name != DepKind::crate_hash && $tcx.dep_graph.is_fully_enabled() {
+ use rustc_middle::dep_graph::dep_kinds;
+ if dep_kinds::$name != dep_kinds::crate_hash && $tcx.dep_graph.is_fully_enabled() {
$tcx.ensure().crate_hash($def_id.krate);
}
- let cdata = rustc_data_structures::sync::MappedReadGuard::map(CStore::from_tcx($tcx), |c| {
+ let cdata = rustc_data_structures::sync::FreezeReadGuard::map(CStore::from_tcx($tcx), |c| {
c.get_crate_data($def_id.krate).cdata
});
let $cdata = crate::creader::CrateMetadataRef {
@@ -147,7 +148,7 @@ macro_rules! provide_one {
macro_rules! provide {
($tcx:ident, $def_id:ident, $other:ident, $cdata:ident,
$($name:ident => { $($compute:tt)* })*) => {
- pub fn provide_extern(providers: &mut ExternProviders) {
+ fn provide_extern(providers: &mut ExternProviders) {
$(provide_one! {
$tcx, $def_id, $other, $cdata, $name => { $($compute)* }
})*
@@ -209,6 +210,7 @@ provide! { tcx, def_id, other, cdata,
inferred_outlives_of => { table_defaulted_array }
super_predicates_of => { table }
type_of => { table }
+ type_alias_is_lazy => { cdata.root.tables.type_alias_is_lazy.get(cdata, def_id.index) }
variances_of => { table }
fn_sig => { table }
codegen_fn_attrs => { table }
@@ -373,7 +375,6 @@ provide! { tcx, def_id, other, cdata,
crate_extern_paths => { cdata.source().paths().cloned().collect() }
expn_that_defined => { cdata.get_expn_that_defined(def_id.index, tcx.sess) }
- generator_diagnostic_data => { cdata.get_generator_diagnostic_data(tcx, def_id.index) }
is_doc_hidden => { cdata.get_attr_flags(def_id.index).contains(AttrFlags::IS_DOC_HIDDEN) }
doc_link_resolutions => { tcx.arena.alloc(cdata.get_doc_link_resolutions(def_id.index)) }
doc_link_traits_in_scope => {
@@ -385,7 +386,7 @@ pub(in crate::rmeta) fn provide(providers: &mut Providers) {
// FIXME(#44234) - almost all of these queries have no sub-queries and
// therefore no actual inputs, they're just reading tables calculated in
// resolve! Does this work? Unsure! That's what the issue is about
- *providers = Providers {
+ providers.queries = rustc_middle::query::Providers {
allocator_kind: |tcx, ()| CStore::from_tcx(tcx).allocator_kind(),
alloc_error_handler_kind: |tcx, ()| CStore::from_tcx(tcx).alloc_error_handler_kind(),
is_private_dep: |_tcx, LocalCrate| false,
@@ -510,11 +511,12 @@ pub(in crate::rmeta) fn provide(providers: &mut Providers) {
crates: |tcx, ()| {
// The list of loaded crates is now frozen in query cache,
// so make sure cstore is not mutably accessed from here on.
- tcx.untracked().cstore.leak();
+ tcx.untracked().cstore.freeze();
tcx.arena.alloc_from_iter(CStore::from_tcx(tcx).iter_crate_data().map(|(cnum, _)| cnum))
},
- ..*providers
+ ..providers.queries
};
+ provide_extern(&mut providers.extern_queries);
}
impl CStore {
diff --git a/compiler/rustc_metadata/src/rmeta/encoder.rs b/compiler/rustc_metadata/src/rmeta/encoder.rs
index be91ad408..a4ba94327 100644
--- a/compiler/rustc_metadata/src/rmeta/encoder.rs
+++ b/compiler/rustc_metadata/src/rmeta/encoder.rs
@@ -14,11 +14,12 @@ use rustc_data_structures::temp_dir::MaybeTempDir;
use rustc_hir as hir;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{
- CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_ID, CRATE_DEF_INDEX, LOCAL_CRATE,
+ CrateNum, DefId, DefIndex, LocalDefId, LocalDefIdSet, CRATE_DEF_ID, CRATE_DEF_INDEX,
+ LOCAL_CRATE,
};
use rustc_hir::definitions::DefPathData;
-use rustc_hir::intravisit;
use rustc_hir::lang_items::LangItem;
+use rustc_hir_pretty::id_to_string;
use rustc_middle::middle::debugger_visualizer::DebuggerVisualizerFile;
use rustc_middle::middle::dependency_format::Linkage;
use rustc_middle::middle::exported_symbols::{
@@ -30,7 +31,6 @@ use rustc_middle::query::Providers;
use rustc_middle::traits::specialization_graph;
use rustc_middle::ty::codec::TyEncoder;
use rustc_middle::ty::fast_reject::{self, SimplifiedType, TreatParams};
-use rustc_middle::ty::TypeVisitableExt;
use rustc_middle::ty::{self, AssocItemContainer, SymbolName, Ty, TyCtxt};
use rustc_middle::util::common::to_readable_str;
use rustc_serialize::{opaque, Decodable, Decoder, Encodable, Encoder};
@@ -50,7 +50,6 @@ pub(super) struct EncodeContext<'a, 'tcx> {
opaque: opaque::FileEncoder,
tcx: TyCtxt<'tcx>,
feat: &'tcx rustc_feature::Features,
-
tables: TableBuilders,
lazy_state: LazyState,
@@ -131,7 +130,8 @@ impl<'a, 'tcx, T> Encodable<EncodeContext<'a, 'tcx>> for LazyArray<T> {
impl<'a, 'tcx, I, T> Encodable<EncodeContext<'a, 'tcx>> for LazyTable<I, T> {
fn encode(&self, e: &mut EncodeContext<'a, 'tcx>) {
- e.emit_usize(self.encoded_size);
+ e.emit_usize(self.width);
+ e.emit_usize(self.len);
e.emit_lazy_distance(self.position);
}
}
@@ -279,8 +279,8 @@ impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for SpanData {
// All of this logic ensures that the final result of deserialization is a 'normal'
// Span that can be used without any additional trouble.
let metadata_index = {
- // Introduce a new scope so that we drop the 'lock()' temporary
- match &*source_file.external_src.lock() {
+ // Introduce a new scope so that we drop the 'read()' temporary
+ match &*source_file.external_src.read() {
ExternalSource::Foreign { metadata_index, .. } => *metadata_index,
src => panic!("Unexpected external source {src:?}"),
}
@@ -347,6 +347,13 @@ impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for Symbol {
}
}
+impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for [u8] {
+ fn encode(&self, e: &mut EncodeContext<'a, 'tcx>) {
+ Encoder::emit_usize(e, self.len());
+ e.emit_raw_bytes(self);
+ }
+}
+
impl<'a, 'tcx> TyEncoder for EncodeContext<'a, 'tcx> {
const CLEAR_CROSS_CRATE: bool = true;
@@ -819,7 +826,7 @@ fn should_encode_span(def_kind: DefKind) -> bool {
| DefKind::Enum
| DefKind::Variant
| DefKind::Trait
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::AssocTy
@@ -854,7 +861,7 @@ fn should_encode_attrs(def_kind: DefKind) -> bool {
| DefKind::Enum
| DefKind::Variant
| DefKind::Trait
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::AssocTy
@@ -895,7 +902,7 @@ fn should_encode_expn_that_defined(def_kind: DefKind) -> bool {
| DefKind::Variant
| DefKind::Trait
| DefKind::Impl { .. } => true,
- DefKind::TyAlias { .. }
+ DefKind::TyAlias
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::AssocTy
@@ -930,7 +937,7 @@ fn should_encode_visibility(def_kind: DefKind) -> bool {
| DefKind::Enum
| DefKind::Variant
| DefKind::Trait
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::AssocTy
@@ -974,7 +981,7 @@ fn should_encode_stability(def_kind: DefKind) -> bool {
| DefKind::Const
| DefKind::Fn
| DefKind::ForeignMod
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::OpaqueTy
| DefKind::Enum
| DefKind::Union
@@ -994,15 +1001,31 @@ fn should_encode_stability(def_kind: DefKind) -> bool {
}
}
-/// Whether we should encode MIR.
+/// Whether we should encode MIR. Return a pair, resp. for CTFE and for LLVM.
///
/// Computing, optimizing and encoding the MIR is a relatively expensive operation.
/// We want to avoid this work when not required. Therefore:
/// - we only compute `mir_for_ctfe` on items with const-eval semantics;
/// - we skip `optimized_mir` for check runs.
+/// - we only encode `optimized_mir` that could be generated in other crates, that is, a code that
+/// is either generic or has inline hint, and is reachable from the other crates (contained
+/// in reachable set).
///
-/// Return a pair, resp. for CTFE and for LLVM.
-fn should_encode_mir(tcx: TyCtxt<'_>, def_id: LocalDefId) -> (bool, bool) {
+/// Note: Reachable set describes definitions that might be generated or referenced from other
+/// crates and it can be used to limit optimized MIR that needs to be encoded. On the other hand,
+/// the reachable set doesn't have much to say about which definitions might be evaluated at compile
+/// time in other crates, so it cannot be used to omit CTFE MIR. For example, `f` below is
+/// unreachable and yet it can be evaluated in other crates:
+///
+/// ```
+/// const fn f() -> usize { 0 }
+/// pub struct S { pub a: [usize; f()] }
+/// ```
+fn should_encode_mir(
+ tcx: TyCtxt<'_>,
+ reachable_set: &LocalDefIdSet,
+ def_id: LocalDefId,
+) -> (bool, bool) {
match tcx.def_kind(def_id) {
// Constructors
DefKind::Ctor(_, _) => {
@@ -1019,14 +1042,15 @@ fn should_encode_mir(tcx: TyCtxt<'_>, def_id: LocalDefId) -> (bool, bool) {
// Full-fledged functions + closures
DefKind::AssocFn | DefKind::Fn | DefKind::Closure => {
let generics = tcx.generics_of(def_id);
- let needs_inline = (generics.requires_monomorphization(tcx)
- || tcx.codegen_fn_attrs(def_id).requests_inline())
- && tcx.sess.opts.output_types.should_codegen();
+ let opt = tcx.sess.opts.unstable_opts.always_encode_mir
+ || (tcx.sess.opts.output_types.should_codegen()
+ && reachable_set.contains(&def_id)
+ && (generics.requires_monomorphization(tcx)
+ || tcx.codegen_fn_attrs(def_id).requests_inline()));
// The function has a `const` modifier or is in a `#[const_trait]`.
let is_const_fn = tcx.is_const_fn_raw(def_id.to_def_id())
|| tcx.is_const_default_method(def_id.to_def_id());
- let always_encode_mir = tcx.sess.opts.unstable_opts.always_encode_mir;
- (is_const_fn, needs_inline || always_encode_mir)
+ (is_const_fn, opt)
}
// Generators require optimized MIR to compute layout.
DefKind::Generator => (false, true),
@@ -1067,9 +1091,7 @@ fn should_encode_variances<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, def_kind: Def
| DefKind::Closure
| DefKind::Generator
| DefKind::ExternCrate => false,
- DefKind::TyAlias { lazy } => {
- lazy || tcx.type_of(def_id).instantiate_identity().has_opaque_types()
- }
+ DefKind::TyAlias => tcx.type_alias_is_lazy(def_id),
}
}
@@ -1080,7 +1102,7 @@ fn should_encode_generics(def_kind: DefKind) -> bool {
| DefKind::Enum
| DefKind::Variant
| DefKind::Trait
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::AssocTy
@@ -1120,7 +1142,7 @@ fn should_encode_type(tcx: TyCtxt<'_>, def_id: LocalDefId, def_kind: DefKind) ->
| DefKind::Fn
| DefKind::Const
| DefKind::Static(..)
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::ForeignTy
| DefKind::Impl { .. }
| DefKind::AssocFn
@@ -1180,7 +1202,7 @@ fn should_encode_fn_sig(def_kind: DefKind) -> bool {
| DefKind::Const
| DefKind::Static(..)
| DefKind::Ctor(..)
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::OpaqueTy
| DefKind::ForeignTy
| DefKind::Impl { .. }
@@ -1221,7 +1243,7 @@ fn should_encode_constness(def_kind: DefKind) -> bool {
| DefKind::AssocConst
| DefKind::AnonConst
| DefKind::Static(..)
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::OpaqueTy
| DefKind::Impl { of_trait: false }
| DefKind::ForeignTy
@@ -1254,7 +1276,7 @@ fn should_encode_const(def_kind: DefKind) -> bool {
| DefKind::Field
| DefKind::Fn
| DefKind::Static(..)
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::OpaqueTy
| DefKind::ForeignTy
| DefKind::Impl { .. }
@@ -1414,7 +1436,8 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
}
}
if let DefKind::Generator = def_kind {
- self.encode_info_for_generator(local_id);
+ let data = self.tcx.generator_kind(def_id).unwrap();
+ record!(self.tables.generator_kind[def_id] <- data);
}
if let DefKind::Enum | DefKind::Struct | DefKind::Union = def_kind {
self.encode_info_for_adt(local_id);
@@ -1425,6 +1448,11 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
if let DefKind::Macro(_) = def_kind {
self.encode_info_for_macro(local_id);
}
+ if let DefKind::TyAlias = def_kind {
+ self.tables
+ .type_alias_is_lazy
+ .set(def_id.index, self.tcx.type_alias_is_lazy(def_id));
+ }
if let DefKind::OpaqueTy = def_kind {
self.encode_explicit_item_bounds(def_id);
self.tables
@@ -1572,9 +1600,10 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
}
let tcx = self.tcx;
+ let reachable_set = tcx.reachable_set(());
let keys_and_jobs = tcx.mir_keys(()).iter().filter_map(|&def_id| {
- let (encode_const, encode_opt) = should_encode_mir(tcx, def_id);
+ let (encode_const, encode_opt) = should_encode_mir(tcx, reachable_set, def_id);
if encode_const || encode_opt { Some((def_id, encode_const, encode_opt)) } else { None }
});
for (def_id, encode_const, encode_opt) in keys_and_jobs {
@@ -1586,8 +1615,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
record!(self.tables.closure_saved_names_of_captured_variables[def_id.to_def_id()]
<- tcx.closure_saved_names_of_captured_variables(def_id));
- if tcx.sess.opts.unstable_opts.drop_tracking_mir
- && let DefKind::Generator = self.tcx.def_kind(def_id)
+ if let DefKind::Generator = self.tcx.def_kind(def_id)
&& let Some(witnesses) = tcx.mir_generator_witnesses(def_id)
{
record!(self.tables.mir_generator_witnesses[def_id.to_def_id()] <- witnesses);
@@ -1607,13 +1635,19 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
record!(self.tables.mir_const_qualif[def_id.to_def_id()] <- qualifs);
let body_id = tcx.hir().maybe_body_owned_by(def_id);
if let Some(body_id) = body_id {
- let const_data = self.encode_rendered_const_for_body(body_id);
+ let const_data = rendered_const(self.tcx, body_id);
record!(self.tables.rendered_const[def_id.to_def_id()] <- const_data);
}
}
}
record!(self.tables.promoted_mir[def_id.to_def_id()] <- tcx.promoted_mir(def_id));
+ if let DefKind::Generator = self.tcx.def_kind(def_id)
+ && let Some(witnesses) = tcx.mir_generator_witnesses(def_id)
+ {
+ record!(self.tables.mir_generator_witnesses[def_id.to_def_id()] <- witnesses);
+ }
+
let instance = ty::InstanceDef::Item(def_id.to_def_id());
let unused = tcx.unused_generic_params(instance);
self.tables.unused_generic_params.set(def_id.local_def_index, unused);
@@ -1675,14 +1709,6 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
}
}
- fn encode_rendered_const_for_body(&mut self, body_id: hir::BodyId) -> String {
- let hir = self.tcx.hir();
- let body = hir.body(body_id);
- rustc_hir_pretty::to_string(&(&hir as &dyn intravisit::Map<'_>), |s| {
- s.print_expr(&body.value)
- })
- }
-
#[instrument(level = "debug", skip(self))]
fn encode_info_for_macro(&mut self, def_id: LocalDefId) {
let tcx = self.tcx;
@@ -1694,15 +1720,6 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
record!(self.tables.macro_definition[def_id.to_def_id()] <- &*macro_def.body);
}
- #[instrument(level = "debug", skip(self))]
- fn encode_info_for_generator(&mut self, def_id: LocalDefId) {
- let typeck_result: &'tcx ty::TypeckResults<'tcx> = self.tcx.typeck(def_id);
- let data = self.tcx.generator_kind(def_id).unwrap();
- let generator_diagnostic_data = typeck_result.get_generator_diagnostic_data();
- record!(self.tables.generator_kind[def_id.to_def_id()] <- data);
- record!(self.tables.generator_diagnostic_data[def_id.to_def_id()] <- generator_diagnostic_data);
- }
-
fn encode_native_libraries(&mut self) -> LazyArray<NativeLib> {
empty_proc_macro!(self);
let used_libraries = self.tcx.native_libraries(LOCAL_CRATE);
@@ -2067,8 +2084,9 @@ fn prefetch_mir(tcx: TyCtxt<'_>) {
return;
}
+ let reachable_set = tcx.reachable_set(());
par_for_each_in(tcx.mir_keys(()), |&def_id| {
- let (encode_const, encode_opt) = should_encode_mir(tcx, def_id);
+ let (encode_const, encode_opt) = should_encode_mir(tcx, reachable_set, def_id);
if encode_const {
tcx.ensure_with_value().mir_for_ctfe(def_id);
@@ -2284,3 +2302,97 @@ pub fn provide(providers: &mut Providers) {
..*providers
}
}
+
+/// Build a textual representation of an unevaluated constant expression.
+///
+/// If the const expression is too complex, an underscore `_` is returned.
+/// For const arguments, it's `{ _ }` to be precise.
+/// This means that the output is not necessarily valid Rust code.
+///
+/// Currently, only
+///
+/// * literals (optionally with a leading `-`)
+/// * unit `()`
+/// * blocks (`{ … }`) around simple expressions and
+/// * paths without arguments
+///
+/// are considered simple enough. Simple blocks are included since they are
+/// necessary to disambiguate unit from the unit type.
+/// This list might get extended in the future.
+///
+/// Without this censoring, in a lot of cases the output would get too large
+/// and verbose. Consider `match` expressions, blocks and deeply nested ADTs.
+/// Further, private and `doc(hidden)` fields of structs would get leaked
+/// since HIR datatypes like the `body` parameter do not contain enough
+/// semantic information for this function to be able to hide them –
+/// at least not without significant performance overhead.
+///
+/// Whenever possible, prefer to evaluate the constant first and try to
+/// use a different method for pretty-printing. Ideally this function
+/// should only ever be used as a fallback.
+pub fn rendered_const<'tcx>(tcx: TyCtxt<'tcx>, body: hir::BodyId) -> String {
+ let hir = tcx.hir();
+ let value = &hir.body(body).value;
+
+ #[derive(PartialEq, Eq)]
+ enum Classification {
+ Literal,
+ Simple,
+ Complex,
+ }
+
+ use Classification::*;
+
+ fn classify(expr: &hir::Expr<'_>) -> Classification {
+ match &expr.kind {
+ hir::ExprKind::Unary(hir::UnOp::Neg, expr) => {
+ if matches!(expr.kind, hir::ExprKind::Lit(_)) { Literal } else { Complex }
+ }
+ hir::ExprKind::Lit(_) => Literal,
+ hir::ExprKind::Tup([]) => Simple,
+ hir::ExprKind::Block(hir::Block { stmts: [], expr: Some(expr), .. }, _) => {
+ if classify(expr) == Complex { Complex } else { Simple }
+ }
+ // Paths with a self-type or arguments are too “complex” following our measure since
+ // they may leak private fields of structs (with feature `adt_const_params`).
+ // Consider: `<Self as Trait<{ Struct { private: () } }>>::CONSTANT`.
+ // Paths without arguments are definitely harmless though.
+ hir::ExprKind::Path(hir::QPath::Resolved(_, hir::Path { segments, .. })) => {
+ if segments.iter().all(|segment| segment.args.is_none()) { Simple } else { Complex }
+ }
+ // FIXME: Claiming that those kinds of QPaths are simple is probably not true if the Ty
+ // contains const arguments. Is there a *concise* way to check for this?
+ hir::ExprKind::Path(hir::QPath::TypeRelative(..)) => Simple,
+ // FIXME: Can they contain const arguments and thus leak private struct fields?
+ hir::ExprKind::Path(hir::QPath::LangItem(..)) => Simple,
+ _ => Complex,
+ }
+ }
+
+ let classification = classify(value);
+
+ if classification == Literal
+ && !value.span.from_expansion()
+ && let Ok(snippet) = tcx.sess.source_map().span_to_snippet(value.span) {
+ // For literals, we avoid invoking the pretty-printer and use the source snippet instead to
+ // preserve certain stylistic choices the user likely made for the sake legibility like
+ //
+ // * hexadecimal notation
+ // * underscores
+ // * character escapes
+ //
+ // FIXME: This passes through `-/*spacer*/0` verbatim.
+ snippet
+ } else if classification == Simple {
+ // Otherwise we prefer pretty-printing to get rid of extraneous whitespace, comments and
+ // other formatting artifacts.
+ id_to_string(&hir, body.hir_id)
+ } else if tcx.def_kind(hir.body_owner_def_id(body).to_def_id()) == DefKind::AnonConst {
+ // FIXME: Omit the curly braces if the enclosing expression is an array literal
+ // with a repeated element (an `ExprKind::Repeat`) as in such case it
+ // would not actually need any disambiguation.
+ "{ _ }".to_owned()
+ } else {
+ "_".to_owned()
+ }
+}
diff --git a/compiler/rustc_metadata/src/rmeta/mod.rs b/compiler/rustc_metadata/src/rmeta/mod.rs
index a89e235ff..42764af52 100644
--- a/compiler/rustc_metadata/src/rmeta/mod.rs
+++ b/compiler/rustc_metadata/src/rmeta/mod.rs
@@ -21,10 +21,10 @@ use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
use rustc_middle::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo};
use rustc_middle::middle::resolve_bound_vars::ObjectLifetimeDefault;
use rustc_middle::mir;
-use rustc_middle::query::Providers;
use rustc_middle::ty::fast_reject::SimplifiedType;
use rustc_middle::ty::{self, ReprOptions, Ty, UnusedGenericParams};
-use rustc_middle::ty::{DeducedParamAttrs, GeneratorDiagnosticData, ParameterizedOverTcx, TyCtxt};
+use rustc_middle::ty::{DeducedParamAttrs, ParameterizedOverTcx, TyCtxt};
+use rustc_middle::util::Providers;
use rustc_serialize::opaque::FileEncoder;
use rustc_session::config::SymbolManglingVersion;
use rustc_session::cstore::{CrateDepKind, ForeignModule, LinkagePreference, NativeLib};
@@ -38,11 +38,10 @@ use rustc_target::spec::{PanicStrategy, TargetTriple};
use std::marker::PhantomData;
use std::num::NonZeroUsize;
-pub use decoder::provide_extern;
use decoder::DecodeContext;
pub(crate) use decoder::{CrateMetadata, CrateNumMap, MetadataBlob};
use encoder::EncodeContext;
-pub use encoder::{encode_metadata, EncodedMetadata};
+pub use encoder::{encode_metadata, rendered_const, EncodedMetadata};
use rustc_span::hygiene::SyntaxContextData;
mod decoder;
@@ -142,7 +141,11 @@ impl<T> LazyArray<T> {
/// eagerly and in-order.
struct LazyTable<I, T> {
position: NonZeroUsize,
- encoded_size: usize,
+ /// The encoded size of the elements of a table is selected at runtime to drop
+ /// trailing zeroes. This is the number of bytes used for each table element.
+ width: usize,
+ /// How many elements are in the table.
+ len: usize,
_marker: PhantomData<fn(I) -> T>,
}
@@ -153,9 +156,10 @@ impl<I: 'static, T: ParameterizedOverTcx> ParameterizedOverTcx for LazyTable<I,
impl<I, T> LazyTable<I, T> {
fn from_position_and_encoded_size(
position: NonZeroUsize,
- encoded_size: usize,
+ width: usize,
+ len: usize,
) -> LazyTable<I, T> {
- LazyTable { position, encoded_size, _marker: PhantomData }
+ LazyTable { position, width, len, _marker: PhantomData }
}
}
@@ -379,6 +383,7 @@ define_tables! {
is_intrinsic: Table<DefIndex, bool>,
is_macro_rules: Table<DefIndex, bool>,
is_type_alias_impl_trait: Table<DefIndex, bool>,
+ type_alias_is_lazy: Table<DefIndex, bool>,
attr_flags: Table<DefIndex, AttrFlags>,
def_path_hashes: Table<DefIndex, DefPathHash>,
explicit_item_bounds: Table<DefIndex, LazyArray<(ty::Clause<'static>, Span)>>,
@@ -434,7 +439,7 @@ define_tables! {
coerce_unsized_info: Table<DefIndex, LazyValue<ty::adjustment::CoerceUnsizedInfo>>,
mir_const_qualif: Table<DefIndex, LazyValue<mir::ConstQualifs>>,
rendered_const: Table<DefIndex, LazyValue<String>>,
- asyncness: Table<DefIndex, hir::IsAsync>,
+ asyncness: Table<DefIndex, ty::Asyncness>,
fn_arg_names: Table<DefIndex, LazyArray<Ident>>,
generator_kind: Table<DefIndex, LazyValue<hir::GeneratorKind>>,
trait_def: Table<DefIndex, LazyValue<ty::TraitDef>>,
@@ -448,7 +453,6 @@ define_tables! {
// definitions from any given crate.
def_keys: Table<DefIndex, LazyValue<DefKey>>,
proc_macro_quoted_spans: Table<usize, LazyValue<Span>>,
- generator_diagnostic_data: Table<DefIndex, LazyValue<GeneratorDiagnosticData<'static>>>,
variant_data: Table<DefIndex, LazyValue<VariantData>>,
assoc_container: Table<DefIndex, ty::AssocItemContainer>,
macro_definition: Table<DefIndex, LazyValue<ast::DelimArgs>>,
diff --git a/compiler/rustc_metadata/src/rmeta/table.rs b/compiler/rustc_metadata/src/rmeta/table.rs
index ea66c770b..bb1320942 100644
--- a/compiler/rustc_metadata/src/rmeta/table.rs
+++ b/compiler/rustc_metadata/src/rmeta/table.rs
@@ -5,7 +5,6 @@ use rustc_hir::def::{CtorKind, CtorOf};
use rustc_index::Idx;
use rustc_middle::ty::{ParameterizedOverTcx, UnusedGenericParams};
use rustc_serialize::opaque::FileEncoder;
-use rustc_serialize::Encoder as _;
use rustc_span::hygiene::MacroKind;
use std::marker::PhantomData;
use std::num::NonZeroUsize;
@@ -38,6 +37,12 @@ impl IsDefault for u32 {
}
}
+impl IsDefault for u64 {
+ fn is_default(&self) -> bool {
+ *self == 0
+ }
+}
+
impl<T> IsDefault for LazyArray<T> {
fn is_default(&self) -> bool {
self.num_elems == 0
@@ -89,6 +94,20 @@ impl FixedSizeEncoding for u32 {
}
}
+impl FixedSizeEncoding for u64 {
+ type ByteArray = [u8; 8];
+
+ #[inline]
+ fn from_bytes(b: &[u8; 8]) -> Self {
+ Self::from_le_bytes(*b)
+ }
+
+ #[inline]
+ fn write_to_bytes(self, b: &mut [u8; 8]) {
+ *b = self.to_le_bytes();
+ }
+}
+
macro_rules! fixed_size_enum {
($ty:ty { $(($($pat:tt)*))* }) => {
impl FixedSizeEncoding for Option<$ty> {
@@ -126,8 +145,7 @@ fixed_size_enum! {
( Enum )
( Variant )
( Trait )
- ( TyAlias { lazy: false } )
- ( TyAlias { lazy: true } )
+ ( TyAlias )
( ForeignTy )
( TraitAlias )
( AssocTy )
@@ -186,9 +204,9 @@ fixed_size_enum! {
}
fixed_size_enum! {
- hir::IsAsync {
- ( NotAsync )
- ( Async )
+ ty::Asyncness {
+ ( Yes )
+ ( No )
}
}
@@ -300,21 +318,21 @@ impl FixedSizeEncoding for UnusedGenericParams {
// generic `LazyValue<T>` impl, but in the general case we might not need / want
// to fit every `usize` in `u32`.
impl<T> FixedSizeEncoding for Option<LazyValue<T>> {
- type ByteArray = [u8; 4];
+ type ByteArray = [u8; 8];
#[inline]
- fn from_bytes(b: &[u8; 4]) -> Self {
- let position = NonZeroUsize::new(u32::from_bytes(b) as usize)?;
+ fn from_bytes(b: &[u8; 8]) -> Self {
+ let position = NonZeroUsize::new(u64::from_bytes(b) as usize)?;
Some(LazyValue::from_position(position))
}
#[inline]
- fn write_to_bytes(self, b: &mut [u8; 4]) {
+ fn write_to_bytes(self, b: &mut [u8; 8]) {
match self {
None => unreachable!(),
Some(lazy) => {
let position = lazy.position.get();
- let position: u32 = position.try_into().unwrap();
+ let position: u64 = position.try_into().unwrap();
position.write_to_bytes(b)
}
}
@@ -323,55 +341,75 @@ impl<T> FixedSizeEncoding for Option<LazyValue<T>> {
impl<T> LazyArray<T> {
#[inline]
- fn write_to_bytes_impl(self, b: &mut [u8; 8]) {
- let ([position_bytes, meta_bytes], []) = b.as_chunks_mut::<4>() else { panic!() };
-
- let position = self.position.get();
- let position: u32 = position.try_into().unwrap();
- position.write_to_bytes(position_bytes);
-
- let len = self.num_elems;
- let len: u32 = len.try_into().unwrap();
- len.write_to_bytes(meta_bytes);
+ fn write_to_bytes_impl(self, b: &mut [u8; 16]) {
+ let position = (self.position.get() as u64).to_le_bytes();
+ let len = (self.num_elems as u64).to_le_bytes();
+
+ // Element width is selected at runtime on a per-table basis by omitting trailing
+ // zero bytes in table elements. This works very naturally when table elements are
+ // simple numbers but `LazyArray` is a pair of integers. If naively encoded, the second
+ // element would shield the trailing zeroes in the first. Interleaving the bytes
+ // of the position and length exposes trailing zeroes in both to the optimization.
+ // We encode length second because we generally expect it to be smaller.
+ for i in 0..8 {
+ b[2 * i] = position[i];
+ b[2 * i + 1] = len[i];
+ }
}
- fn from_bytes_impl(position_bytes: &[u8; 4], meta_bytes: &[u8; 4]) -> Option<LazyArray<T>> {
- let position = NonZeroUsize::new(u32::from_bytes(position_bytes) as usize)?;
- let len = u32::from_bytes(meta_bytes) as usize;
+ fn from_bytes_impl(position: &[u8; 8], meta: &[u8; 8]) -> Option<LazyArray<T>> {
+ let position = NonZeroUsize::new(u64::from_bytes(&position) as usize)?;
+ let len = u64::from_bytes(&meta) as usize;
Some(LazyArray::from_position_and_num_elems(position, len))
}
}
+// Decoding helper for the encoding scheme used by `LazyArray`.
+// Interleaving the bytes of the two integers exposes trailing bytes in the first integer
+// to the varint scheme that we use for tables.
+#[inline]
+fn decode_interleaved(encoded: &[u8; 16]) -> ([u8; 8], [u8; 8]) {
+ let mut first = [0u8; 8];
+ let mut second = [0u8; 8];
+ for i in 0..8 {
+ first[i] = encoded[2 * i];
+ second[i] = encoded[2 * i + 1];
+ }
+ (first, second)
+}
+
impl<T> FixedSizeEncoding for LazyArray<T> {
- type ByteArray = [u8; 8];
+ type ByteArray = [u8; 16];
#[inline]
- fn from_bytes(b: &[u8; 8]) -> Self {
- let ([position_bytes, meta_bytes], []) = b.as_chunks::<4>() else { panic!() };
- if *meta_bytes == [0; 4] {
+ fn from_bytes(b: &[u8; 16]) -> Self {
+ let (position, meta) = decode_interleaved(b);
+
+ if meta == [0; 8] {
return Default::default();
}
- LazyArray::from_bytes_impl(position_bytes, meta_bytes).unwrap()
+ LazyArray::from_bytes_impl(&position, &meta).unwrap()
}
#[inline]
- fn write_to_bytes(self, b: &mut [u8; 8]) {
+ fn write_to_bytes(self, b: &mut [u8; 16]) {
assert!(!self.is_default());
self.write_to_bytes_impl(b)
}
}
impl<T> FixedSizeEncoding for Option<LazyArray<T>> {
- type ByteArray = [u8; 8];
+ type ByteArray = [u8; 16];
#[inline]
- fn from_bytes(b: &[u8; 8]) -> Self {
- let ([position_bytes, meta_bytes], []) = b.as_chunks::<4>() else { panic!() };
- LazyArray::from_bytes_impl(position_bytes, meta_bytes)
+ fn from_bytes(b: &[u8; 16]) -> Self {
+ let (position, meta) = decode_interleaved(b);
+
+ LazyArray::from_bytes_impl(&position, &meta)
}
#[inline]
- fn write_to_bytes(self, b: &mut [u8; 8]) {
+ fn write_to_bytes(self, b: &mut [u8; 16]) {
match self {
None => unreachable!(),
Some(lazy) => lazy.write_to_bytes_impl(b),
@@ -381,13 +419,14 @@ impl<T> FixedSizeEncoding for Option<LazyArray<T>> {
/// Helper for constructing a table's serialization (also see `Table`).
pub(super) struct TableBuilder<I: Idx, T: FixedSizeEncoding> {
+ width: usize,
blocks: IndexVec<I, T::ByteArray>,
_marker: PhantomData<T>,
}
impl<I: Idx, T: FixedSizeEncoding> Default for TableBuilder<I, T> {
fn default() -> Self {
- TableBuilder { blocks: Default::default(), _marker: PhantomData }
+ TableBuilder { width: 0, blocks: Default::default(), _marker: PhantomData }
}
}
@@ -415,40 +454,66 @@ impl<I: Idx, const N: usize, T: FixedSizeEncoding<ByteArray = [u8; N]>> TableBui
// > store bit-masks of which item in each bucket is actually serialized).
let block = self.blocks.ensure_contains_elem(i, || [0; N]);
value.write_to_bytes(block);
+ if self.width != N {
+ let width = N - trailing_zeros(block);
+ self.width = self.width.max(width);
+ }
}
}
pub(crate) fn encode(&self, buf: &mut FileEncoder) -> LazyTable<I, T> {
let pos = buf.position();
+
+ let width = self.width;
for block in &self.blocks {
- buf.emit_raw_bytes(block);
+ buf.write_with(|dest| {
+ *dest = *block;
+ width
+ });
}
- let num_bytes = self.blocks.len() * N;
+
LazyTable::from_position_and_encoded_size(
NonZeroUsize::new(pos as usize).unwrap(),
- num_bytes,
+ width,
+ self.blocks.len(),
)
}
}
+fn trailing_zeros(x: &[u8]) -> usize {
+ x.iter().rev().take_while(|b| **b == 0).count()
+}
+
impl<I: Idx, const N: usize, T: FixedSizeEncoding<ByteArray = [u8; N]> + ParameterizedOverTcx>
LazyTable<I, T>
where
for<'tcx> T::Value<'tcx>: FixedSizeEncoding<ByteArray = [u8; N]>,
{
/// Given the metadata, extract out the value at a particular index (if any).
- #[inline(never)]
pub(super) fn get<'a, 'tcx, M: Metadata<'a, 'tcx>>(&self, metadata: M, i: I) -> T::Value<'tcx> {
- trace!("LazyTable::lookup: index={:?} len={:?}", i, self.encoded_size);
+ trace!("LazyTable::lookup: index={:?} len={:?}", i, self.len);
+
+ // Access past the end of the table returns a Default
+ if i.index() >= self.len {
+ return Default::default();
+ }
- let start = self.position.get();
- let bytes = &metadata.blob()[start..start + self.encoded_size];
- let (bytes, []) = bytes.as_chunks::<N>() else { panic!() };
- bytes.get(i.index()).map_or_else(Default::default, FixedSizeEncoding::from_bytes)
+ let width = self.width;
+ let start = self.position.get() + (width * i.index());
+ let end = start + width;
+ let bytes = &metadata.blob()[start..end];
+
+ if let Ok(fixed) = bytes.try_into() {
+ FixedSizeEncoding::from_bytes(fixed)
+ } else {
+ let mut fixed = [0u8; N];
+ fixed[..width].copy_from_slice(bytes);
+ FixedSizeEncoding::from_bytes(&fixed)
+ }
}
/// Size of the table in entries, including possible gaps.
pub(super) fn size(&self) -> usize {
- self.encoded_size / N
+ self.len
}
}
diff --git a/compiler/rustc_middle/messages.ftl b/compiler/rustc_middle/messages.ftl
index 108a10b50..82162fd85 100644
--- a/compiler/rustc_middle/messages.ftl
+++ b/compiler/rustc_middle/messages.ftl
@@ -52,6 +52,8 @@ middle_drop_check_overflow =
overflow while adding drop-check rules for {$ty}
.note = overflowed on {$overflow_ty}
+middle_erroneous_constant = erroneous constant encountered
+
middle_layout_references_error =
the type has an unknown layout
diff --git a/compiler/rustc_middle/src/dep_graph/dep_node.rs b/compiler/rustc_middle/src/dep_graph/dep_node.rs
index 04c09d334..39d82c489 100644
--- a/compiler/rustc_middle/src/dep_graph/dep_node.rs
+++ b/compiler/rustc_middle/src/dep_graph/dep_node.rs
@@ -65,9 +65,9 @@ use rustc_hir::definitions::DefPathHash;
use rustc_hir::{HirId, ItemLocalId, OwnerId};
use rustc_query_system::dep_graph::FingerprintStyle;
use rustc_span::symbol::Symbol;
-use std::hash::Hash;
-pub use rustc_query_system::dep_graph::{DepContext, DepNodeParams};
+pub use rustc_query_system::dep_graph::dep_node::DepKind;
+pub use rustc_query_system::dep_graph::{DepContext, DepNode, DepNodeParams};
macro_rules! define_dep_nodes {
(
@@ -80,15 +80,43 @@ macro_rules! define_dep_nodes {
}
/// This enum serves as an index into arrays built by `make_dep_kind_array`.
- #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Encodable, Decodable)]
+ // This enum has more than u8::MAX variants so we need some kind of multi-byte
+ // encoding. The derived Encodable/Decodable uses leb128 encoding which is
+ // dense when only considering this enum. But DepKind is encoded in a larger
+ // struct, and there we can take advantage of the unused bits in the u16.
#[allow(non_camel_case_types)]
- pub enum DepKind {
+ #[repr(u16)] // Must be kept in sync with the inner type of `DepKind`.
+ enum DepKindDefs {
$( $( #[$attr] )* $variant),*
}
+ #[allow(non_upper_case_globals)]
+ pub mod dep_kinds {
+ use super::*;
+
+ $(
+ // The `as u16` cast must be kept in sync with the inner type of `DepKind`.
+ pub const $variant: DepKind = DepKind::new(DepKindDefs::$variant as u16);
+ )*
+ }
+
+ // This checks that the discriminants of the variants have been assigned consecutively
+ // from 0 so that they can be used as a dense index.
+ pub const DEP_KIND_VARIANTS: u16 = {
+ let deps = &[$(dep_kinds::$variant,)*];
+ let mut i = 0;
+ while i < deps.len() {
+ if i != deps[i].as_usize() {
+ panic!();
+ }
+ i += 1;
+ }
+ deps.len() as u16
+ };
+
pub(super) fn dep_kind_from_label_string(label: &str) -> Result<DepKind, ()> {
match label {
- $(stringify!($variant) => Ok(DepKind::$variant),)*
+ $(stringify!($variant) => Ok(dep_kinds::$variant),)*
_ => Err(()),
}
}
@@ -117,7 +145,7 @@ rustc_query_append!(define_dep_nodes![
// WARNING: `construct` is generic and does not know that `CompileCodegenUnit` takes `Symbol`s as keys.
// Be very careful changing this type signature!
pub(crate) fn make_compile_codegen_unit(tcx: TyCtxt<'_>, name: Symbol) -> DepNode {
- DepNode::construct(tcx, DepKind::CompileCodegenUnit, &name)
+ DepNode::construct(tcx, dep_kinds::CompileCodegenUnit, &name)
}
// WARNING: `construct` is generic and does not know that `CompileMonoItem` takes `MonoItem`s as keys.
@@ -126,20 +154,9 @@ pub(crate) fn make_compile_mono_item<'tcx>(
tcx: TyCtxt<'tcx>,
mono_item: &MonoItem<'tcx>,
) -> DepNode {
- DepNode::construct(tcx, DepKind::CompileMonoItem, mono_item)
+ DepNode::construct(tcx, dep_kinds::CompileMonoItem, mono_item)
}
-pub type DepNode = rustc_query_system::dep_graph::DepNode<DepKind>;
-
-// We keep a lot of `DepNode`s in memory during compilation. It's not
-// required that their size stay the same, but we don't want to change
-// it inadvertently. This assert just ensures we're aware of any change.
-#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
-static_assert_size!(DepNode, 18);
-
-#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
-static_assert_size!(DepNode, 24);
-
pub trait DepNodeExt: Sized {
/// Extracts the DefId corresponding to this DepNode. This will work
/// if two conditions are met:
diff --git a/compiler/rustc_middle/src/dep_graph/mod.rs b/compiler/rustc_middle/src/dep_graph/mod.rs
index f79ce08b8..76ef62f9f 100644
--- a/compiler/rustc_middle/src/dep_graph/mod.rs
+++ b/compiler/rustc_middle/src/dep_graph/mod.rs
@@ -6,48 +6,24 @@ use rustc_session::Session;
#[macro_use]
mod dep_node;
+pub use rustc_query_system::dep_graph::debug::EdgeFilter;
pub use rustc_query_system::dep_graph::{
- debug::DepNodeFilter, hash_result, DepContext, DepNodeColor, DepNodeIndex,
- SerializedDepNodeIndex, WorkProduct, WorkProductId, WorkProductMap,
+ debug::DepNodeFilter, hash_result, DepContext, DepGraphQuery, DepNodeColor, DepNodeIndex, Deps,
+ SerializedDepGraph, SerializedDepNodeIndex, TaskDeps, TaskDepsRef, WorkProduct, WorkProductId,
+ WorkProductMap,
};
-pub use dep_node::{label_strs, DepKind, DepNode, DepNodeExt};
+pub use dep_node::{dep_kinds, label_strs, DepKind, DepNode, DepNodeExt};
pub(crate) use dep_node::{make_compile_codegen_unit, make_compile_mono_item};
-pub type DepGraph = rustc_query_system::dep_graph::DepGraph<DepKind>;
+pub type DepGraph = rustc_query_system::dep_graph::DepGraph<DepsType>;
-pub type TaskDeps = rustc_query_system::dep_graph::TaskDeps<DepKind>;
-pub type TaskDepsRef<'a> = rustc_query_system::dep_graph::TaskDepsRef<'a, DepKind>;
-pub type DepGraphQuery = rustc_query_system::dep_graph::DepGraphQuery<DepKind>;
-pub type SerializedDepGraph = rustc_query_system::dep_graph::SerializedDepGraph<DepKind>;
-pub type EdgeFilter = rustc_query_system::dep_graph::debug::EdgeFilter<DepKind>;
pub type DepKindStruct<'tcx> = rustc_query_system::dep_graph::DepKindStruct<TyCtxt<'tcx>>;
-impl rustc_query_system::dep_graph::DepKind for DepKind {
- const NULL: Self = DepKind::Null;
- const RED: Self = DepKind::Red;
-
- fn debug_node(node: &DepNode, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(f, "{:?}(", node.kind)?;
-
- ty::tls::with_opt(|opt_tcx| {
- if let Some(tcx) = opt_tcx {
- if let Some(def_id) = node.extract_def_id(tcx) {
- write!(f, "{}", tcx.def_path_debug_str(def_id))?;
- } else if let Some(ref s) = tcx.dep_graph.dep_node_debug_str(*node) {
- write!(f, "{s}")?;
- } else {
- write!(f, "{}", node.hash)?;
- }
- } else {
- write!(f, "{}", node.hash)?;
- }
- Ok(())
- })?;
-
- write!(f, ")")
- }
+#[derive(Clone)]
+pub struct DepsType;
+impl Deps for DepsType {
fn with_deps<OP, R>(task_deps: TaskDepsRef<'_>, op: OP) -> R
where
OP: FnOnce() -> R,
@@ -68,10 +44,14 @@ impl rustc_query_system::dep_graph::DepKind for DepKind {
op(icx.task_deps)
})
}
+
+ const DEP_KIND_NULL: DepKind = dep_kinds::Null;
+ const DEP_KIND_RED: DepKind = dep_kinds::Red;
+ const DEP_KIND_MAX: u16 = dep_node::DEP_KIND_VARIANTS - 1;
}
impl<'tcx> DepContext for TyCtxt<'tcx> {
- type DepKind = DepKind;
+ type Deps = DepsType;
#[inline]
fn with_stable_hashing_context<R>(self, f: impl FnOnce(StableHashingContext<'_>) -> R) -> R {
@@ -95,6 +75,6 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
#[inline]
fn dep_kind_info(&self, dk: DepKind) -> &DepKindStruct<'tcx> {
- &self.query_kinds[dk as usize]
+ &self.query_kinds[dk.as_usize()]
}
}
diff --git a/compiler/rustc_middle/src/error.rs b/compiler/rustc_middle/src/error.rs
index b346cd453..3c5536570 100644
--- a/compiler/rustc_middle/src/error.rs
+++ b/compiler/rustc_middle/src/error.rs
@@ -144,5 +144,12 @@ pub struct UnsupportedFnAbi {
pub abi: &'static str,
}
+#[derive(Diagnostic)]
+#[diag(middle_erroneous_constant)]
+pub struct ErroneousConstant {
+ #[primary_span]
+ pub span: Span,
+}
+
/// Used by `rustc_const_eval`
pub use crate::fluent_generated::middle_adjust_for_foreign_abi_error;
diff --git a/compiler/rustc_middle/src/hir/map/mod.rs b/compiler/rustc_middle/src/hir/map/mod.rs
index 467962b39..4af2d83e9 100644
--- a/compiler/rustc_middle/src/hir/map/mod.rs
+++ b/compiler/rustc_middle/src/hir/map/mod.rs
@@ -196,9 +196,7 @@ impl<'hir> Map<'hir> {
ItemKind::Macro(_, macro_kind) => DefKind::Macro(macro_kind),
ItemKind::Mod(..) => DefKind::Mod,
ItemKind::OpaqueTy(..) => DefKind::OpaqueTy,
- ItemKind::TyAlias(..) => {
- DefKind::TyAlias { lazy: self.tcx.features().lazy_type_alias }
- }
+ ItemKind::TyAlias(..) => DefKind::TyAlias,
ItemKind::Enum(..) => DefKind::Enum,
ItemKind::Struct(..) => DefKind::Struct,
ItemKind::Union(..) => DefKind::Union,
@@ -442,9 +440,10 @@ impl<'hir> Map<'hir> {
/// Panics if `LocalDefId` does not have an associated body.
pub fn body_owner_kind(self, def_id: LocalDefId) -> BodyOwnerKind {
match self.tcx.def_kind(def_id) {
- DefKind::Const | DefKind::AssocConst | DefKind::InlineConst | DefKind::AnonConst => {
- BodyOwnerKind::Const
+ DefKind::Const | DefKind::AssocConst | DefKind::AnonConst => {
+ BodyOwnerKind::Const { inline: false }
}
+ DefKind::InlineConst => BodyOwnerKind::Const { inline: true },
DefKind::Ctor(..) | DefKind::Fn | DefKind::AssocFn => BodyOwnerKind::Fn,
DefKind::Closure | DefKind::Generator => BodyOwnerKind::Closure,
DefKind::Static(mt) => BodyOwnerKind::Static(mt),
@@ -461,7 +460,7 @@ impl<'hir> Map<'hir> {
/// just that it has to be checked as if it were.
pub fn body_const_context(self, def_id: LocalDefId) -> Option<ConstContext> {
let ccx = match self.body_owner_kind(def_id) {
- BodyOwnerKind::Const => ConstContext::Const,
+ BodyOwnerKind::Const { inline } => ConstContext::Const { inline },
BodyOwnerKind::Static(mt) => ConstContext::Static(mt),
BodyOwnerKind::Fn if self.tcx.is_constructor(def_id.to_def_id()) => return None,
@@ -701,6 +700,8 @@ impl<'hir> Map<'hir> {
// expressions.
ignore_tail = true;
}
+
+ let mut prev_hir_id = None;
while let Some((hir_id, node)) = iter.next() {
if let (Some((_, next_node)), false) = (iter.peek(), ignore_tail) {
match next_node {
@@ -715,7 +716,14 @@ impl<'hir> Map<'hir> {
| Node::ForeignItem(_)
| Node::TraitItem(_)
| Node::Expr(Expr { kind: ExprKind::Closure { .. }, .. })
- | Node::ImplItem(_) => return Some(hir_id),
+ | Node::ImplItem(_)
+ // The input node `id` must be enclosed in the method's body as opposed
+ // to some other place such as its return type (fixes #114918).
+ // We verify that indirectly by checking that the previous node is the
+ // current node's body
+ if node.body_id().map(|b| b.hir_id) == prev_hir_id => {
+ return Some(hir_id)
+ }
// Ignore `return`s on the first iteration
Node::Expr(Expr { kind: ExprKind::Loop(..) | ExprKind::Ret(..), .. })
| Node::Local(_) => {
@@ -723,6 +731,8 @@ impl<'hir> Map<'hir> {
}
_ => {}
}
+
+ prev_hir_id = Some(hir_id);
}
None
}
@@ -1195,8 +1205,8 @@ pub(super) fn crate_hash(tcx: TyCtxt<'_>, _: LocalCrate) -> Svh {
upstream_crates.hash_stable(&mut hcx, &mut stable_hasher);
source_file_names.hash_stable(&mut hcx, &mut stable_hasher);
debugger_visualizers.hash_stable(&mut hcx, &mut stable_hasher);
- if tcx.sess.opts.incremental_relative_spans() {
- let definitions = tcx.definitions_untracked();
+ if tcx.sess.opts.incremental.is_some() {
+ let definitions = tcx.untracked().definitions.freeze();
let mut owner_spans: Vec<_> = krate
.owners
.iter_enumerated()
@@ -1215,7 +1225,6 @@ pub(super) fn crate_hash(tcx: TyCtxt<'_>, _: LocalCrate) -> Svh {
tcx.stable_crate_id(LOCAL_CRATE).hash_stable(&mut hcx, &mut stable_hasher);
// Hash visibility information since it does not appear in HIR.
resolutions.visibilities.hash_stable(&mut hcx, &mut stable_hasher);
- resolutions.has_pub_restricted.hash_stable(&mut hcx, &mut stable_hasher);
stable_hasher.finish()
});
diff --git a/compiler/rustc_middle/src/hooks/mod.rs b/compiler/rustc_middle/src/hooks/mod.rs
new file mode 100644
index 000000000..12aeae177
--- /dev/null
+++ b/compiler/rustc_middle/src/hooks/mod.rs
@@ -0,0 +1,65 @@
+use crate::mir;
+use crate::query::TyCtxtAt;
+use crate::ty::{Ty, TyCtxt};
+use rustc_span::DUMMY_SP;
+
+macro_rules! declare_hooks {
+ ($($(#[$attr:meta])*hook $name:ident($($arg:ident: $K:ty),*) -> $V:ty;)*) => {
+
+ impl<'tcx> TyCtxt<'tcx> {
+ $(
+ $(#[$attr])*
+ #[inline(always)]
+ #[must_use]
+ pub fn $name(self, $($arg: $K,)*) -> $V
+ {
+ self.at(DUMMY_SP).$name($($arg,)*)
+ }
+ )*
+ }
+
+ impl<'tcx> TyCtxtAt<'tcx> {
+ $(
+ $(#[$attr])*
+ #[inline(always)]
+ #[must_use]
+ #[instrument(level = "debug", skip(self), ret)]
+ pub fn $name(self, $($arg: $K,)*) -> $V
+ {
+ (self.tcx.hooks.$name)(self, $($arg,)*)
+ }
+ )*
+ }
+
+ pub struct Providers {
+ $(pub $name: for<'tcx> fn(
+ TyCtxtAt<'tcx>,
+ $($arg: $K,)*
+ ) -> $V,)*
+ }
+
+ impl Default for Providers {
+ fn default() -> Self {
+ Providers {
+ $($name: |_, $($arg,)*| bug!(
+ "`tcx.{}{:?}` cannot be called as `{}` was never assigned to a provider function.\n",
+ stringify!($name),
+ ($($arg,)*),
+ stringify!($name),
+ ),)*
+ }
+ }
+ }
+
+ impl Copy for Providers {}
+ impl Clone for Providers {
+ fn clone(&self) -> Self { *self }
+ }
+ };
+}
+
+declare_hooks! {
+ /// Tries to destructure an `mir::Const` ADT or array into its variant index
+ /// and its field values. This should only be used for pretty printing.
+ hook try_destructure_mir_constant_for_diagnostics(val: mir::ConstValue<'tcx>, ty: Ty<'tcx>) -> Option<mir::DestructuredConstant<'tcx>>;
+}
diff --git a/compiler/rustc_middle/src/infer/canonical.rs b/compiler/rustc_middle/src/infer/canonical.rs
index 81823118a..41beca072 100644
--- a/compiler/rustc_middle/src/infer/canonical.rs
+++ b/compiler/rustc_middle/src/infer/canonical.rs
@@ -27,19 +27,30 @@ use crate::ty::GenericArg;
use crate::ty::{self, BoundVar, List, Region, Ty, TyCtxt};
use rustc_macros::HashStable;
use smallvec::SmallVec;
+use std::fmt::Display;
use std::ops::Index;
/// A "canonicalized" type `V` is one where all free inference
/// variables have been rewritten to "canonical vars". These are
/// numbered starting from 0 in order of first appearance.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable)]
-#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
pub struct Canonical<'tcx, V> {
pub value: V,
pub max_universe: ty::UniverseIndex,
pub variables: CanonicalVarInfos<'tcx>,
}
+impl<'tcx, V: Display> std::fmt::Display for Canonical<'tcx, V> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(
+ f,
+ "Canonical {{ value: {}, max_universe: {:?}, variables: {:?} }}",
+ self.value, self.max_universe, self.variables
+ )
+ }
+}
+
pub type CanonicalVarInfos<'tcx> = &'tcx List<CanonicalVarInfo<'tcx>>;
impl<'tcx> ty::TypeFoldable<TyCtxt<'tcx>> for CanonicalVarInfos<'tcx> {
@@ -61,7 +72,7 @@ impl<'tcx> ty::TypeFoldable<TyCtxt<'tcx>> for CanonicalVarInfos<'tcx> {
/// variables. You will need to supply it later to instantiate the
/// canonicalized query response.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable)]
-#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
pub struct CanonicalVarValues<'tcx> {
pub var_values: ty::GenericArgsRef<'tcx>,
}
@@ -173,6 +184,7 @@ impl<'tcx> CanonicalVarInfo<'tcx> {
CanonicalVarKind::PlaceholderRegion(..) => false,
CanonicalVarKind::Const(..) => true,
CanonicalVarKind::PlaceholderConst(_, _) => false,
+ CanonicalVarKind::Effect => true,
}
}
@@ -182,7 +194,8 @@ impl<'tcx> CanonicalVarInfo<'tcx> {
CanonicalVarKind::Ty(_)
| CanonicalVarKind::PlaceholderTy(_)
| CanonicalVarKind::Const(_, _)
- | CanonicalVarKind::PlaceholderConst(_, _) => false,
+ | CanonicalVarKind::PlaceholderConst(_, _)
+ | CanonicalVarKind::Effect => false,
}
}
@@ -190,7 +203,8 @@ impl<'tcx> CanonicalVarInfo<'tcx> {
match self.kind {
CanonicalVarKind::Ty(_)
| CanonicalVarKind::Region(_)
- | CanonicalVarKind::Const(_, _) => bug!("expected placeholder: {self:?}"),
+ | CanonicalVarKind::Const(_, _)
+ | CanonicalVarKind::Effect => bug!("expected placeholder: {self:?}"),
CanonicalVarKind::PlaceholderRegion(placeholder) => placeholder.bound.var.as_usize(),
CanonicalVarKind::PlaceholderTy(placeholder) => placeholder.bound.var.as_usize(),
@@ -222,6 +236,9 @@ pub enum CanonicalVarKind<'tcx> {
/// Some kind of const inference variable.
Const(ty::UniverseIndex, Ty<'tcx>),
+ /// Effect variable `'?E`.
+ Effect,
+
/// A "placeholder" that represents "any const".
PlaceholderConst(ty::PlaceholderConst<'tcx>, Ty<'tcx>),
}
@@ -229,11 +246,11 @@ pub enum CanonicalVarKind<'tcx> {
impl<'tcx> CanonicalVarKind<'tcx> {
pub fn universe(self) -> ty::UniverseIndex {
match self {
- CanonicalVarKind::Ty(kind) => match kind {
- CanonicalTyVarKind::General(ui) => ui,
- CanonicalTyVarKind::Float | CanonicalTyVarKind::Int => ty::UniverseIndex::ROOT,
- },
-
+ CanonicalVarKind::Ty(CanonicalTyVarKind::General(ui)) => ui,
+ CanonicalVarKind::Ty(CanonicalTyVarKind::Float | CanonicalTyVarKind::Int) => {
+ ty::UniverseIndex::ROOT
+ }
+ CanonicalVarKind::Effect => ty::UniverseIndex::ROOT,
CanonicalVarKind::PlaceholderTy(placeholder) => placeholder.universe,
CanonicalVarKind::Region(ui) => ui,
CanonicalVarKind::PlaceholderRegion(placeholder) => placeholder.universe,
@@ -248,15 +265,14 @@ impl<'tcx> CanonicalVarKind<'tcx> {
/// the updated universe is not the root.
pub fn with_updated_universe(self, ui: ty::UniverseIndex) -> CanonicalVarKind<'tcx> {
match self {
- CanonicalVarKind::Ty(kind) => match kind {
- CanonicalTyVarKind::General(_) => {
- CanonicalVarKind::Ty(CanonicalTyVarKind::General(ui))
- }
- CanonicalTyVarKind::Int | CanonicalTyVarKind::Float => {
- assert_eq!(ui, ty::UniverseIndex::ROOT);
- CanonicalVarKind::Ty(kind)
- }
- },
+ CanonicalVarKind::Ty(CanonicalTyVarKind::General(_)) => {
+ CanonicalVarKind::Ty(CanonicalTyVarKind::General(ui))
+ }
+ CanonicalVarKind::Ty(CanonicalTyVarKind::Int | CanonicalTyVarKind::Float)
+ | CanonicalVarKind::Effect => {
+ assert_eq!(ui, ty::UniverseIndex::ROOT);
+ self
+ }
CanonicalVarKind::PlaceholderTy(placeholder) => {
CanonicalVarKind::PlaceholderTy(ty::Placeholder { universe: ui, ..placeholder })
}
@@ -295,7 +311,7 @@ pub enum CanonicalTyVarKind {
/// After we execute a query with a canonicalized key, we get back a
/// `Canonical<QueryResponse<..>>`. You can use
/// `instantiate_query_result` to access the data in this result.
-#[derive(Clone, Debug, HashStable, TypeFoldable, TypeVisitable, Lift)]
+#[derive(Clone, Debug, HashStable, TypeFoldable, TypeVisitable)]
pub struct QueryResponse<'tcx, R> {
pub var_values: CanonicalVarValues<'tcx>,
pub region_constraints: QueryRegionConstraints<'tcx>,
@@ -310,7 +326,7 @@ pub struct QueryResponse<'tcx, R> {
}
#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]
-#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
pub struct QueryRegionConstraints<'tcx> {
pub outlives: Vec<QueryOutlivesConstraint<'tcx>>,
pub member_constraints: Vec<MemberConstraint<'tcx>>,
@@ -416,7 +432,7 @@ impl<'tcx, V> Canonical<'tcx, V> {
pub type QueryOutlivesConstraint<'tcx> =
(ty::OutlivesPredicate<GenericArg<'tcx>, Region<'tcx>>, ConstraintCategory<'tcx>);
-TrivialTypeTraversalAndLiftImpls! {
+TrivialTypeTraversalImpls! {
crate::infer::canonical::Certainty,
crate::infer::canonical::CanonicalTyVarKind,
}
@@ -439,10 +455,17 @@ impl<'tcx> CanonicalVarValues<'tcx> {
CanonicalVarKind::Region(_) | CanonicalVarKind::PlaceholderRegion(_) => {
let br = ty::BoundRegion {
var: ty::BoundVar::from_usize(i),
- kind: ty::BrAnon(None),
+ kind: ty::BrAnon,
};
ty::Region::new_late_bound(tcx, ty::INNERMOST, br).into()
}
+ CanonicalVarKind::Effect => ty::Const::new_bound(
+ tcx,
+ ty::INNERMOST,
+ ty::BoundVar::from_usize(i),
+ tcx.types.bool,
+ )
+ .into(),
CanonicalVarKind::Const(_, ty)
| CanonicalVarKind::PlaceholderConst(_, ty) => ty::Const::new_bound(
tcx,
diff --git a/compiler/rustc_middle/src/infer/mod.rs b/compiler/rustc_middle/src/infer/mod.rs
index 493bb8a68..1384611e1 100644
--- a/compiler/rustc_middle/src/infer/mod.rs
+++ b/compiler/rustc_middle/src/infer/mod.rs
@@ -13,7 +13,7 @@ use rustc_span::Span;
/// R0 member of [O1..On]
/// ```
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
-#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
pub struct MemberConstraint<'tcx> {
/// The `DefId` and args of the opaque type causing this constraint.
/// Used for error reporting.
diff --git a/compiler/rustc_middle/src/infer/unify_key.rs b/compiler/rustc_middle/src/infer/unify_key.rs
index 85fb9214d..7ca964759 100644
--- a/compiler/rustc_middle/src/infer/unify_key.rs
+++ b/compiler/rustc_middle/src/infer/unify_key.rs
@@ -188,3 +188,53 @@ impl<'tcx> UnifyValue for ConstVarValue<'tcx> {
})
}
}
+
+/// values for the effect inference variable
+#[derive(Clone, Copy, Debug)]
+pub enum EffectVarValue<'tcx> {
+ /// The host effect is on, enabling access to syscalls, filesystem access, etc.
+ Host,
+ /// The host effect is off. Execution is restricted to const operations only.
+ NoHost,
+ Const(ty::Const<'tcx>),
+}
+
+impl<'tcx> EffectVarValue<'tcx> {
+ pub fn as_const(self, tcx: TyCtxt<'tcx>) -> ty::Const<'tcx> {
+ match self {
+ EffectVarValue::Host => tcx.consts.true_,
+ EffectVarValue::NoHost => tcx.consts.false_,
+ EffectVarValue::Const(c) => c,
+ }
+ }
+}
+
+impl<'tcx> UnifyValue for EffectVarValue<'tcx> {
+ type Error = (EffectVarValue<'tcx>, EffectVarValue<'tcx>);
+ fn unify_values(value1: &Self, value2: &Self) -> Result<Self, Self::Error> {
+ match (value1, value2) {
+ (EffectVarValue::Host, EffectVarValue::Host) => Ok(EffectVarValue::Host),
+ (EffectVarValue::NoHost, EffectVarValue::NoHost) => Ok(EffectVarValue::NoHost),
+ (EffectVarValue::NoHost | EffectVarValue::Host, _)
+ | (_, EffectVarValue::NoHost | EffectVarValue::Host) => Err((*value1, *value2)),
+ (EffectVarValue::Const(_), EffectVarValue::Const(_)) => {
+ bug!("equating two const variables, both of which have known values")
+ }
+ }
+ }
+}
+
+impl<'tcx> UnifyKey for ty::EffectVid<'tcx> {
+ type Value = Option<EffectVarValue<'tcx>>;
+ #[inline]
+ fn index(&self) -> u32 {
+ self.index
+ }
+ #[inline]
+ fn from_index(i: u32) -> Self {
+ ty::EffectVid { index: i, phantom: PhantomData }
+ }
+ fn tag() -> &'static str {
+ "EffectVid"
+ }
+}
diff --git a/compiler/rustc_middle/src/lib.rs b/compiler/rustc_middle/src/lib.rs
index d3fc1b285..fe4fc3761 100644
--- a/compiler/rustc_middle/src/lib.rs
+++ b/compiler/rustc_middle/src/lib.rs
@@ -63,7 +63,7 @@
#![feature(macro_metavar_expr)]
#![recursion_limit = "512"]
#![allow(rustc::potential_query_instability)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#[macro_use]
extern crate bitflags;
@@ -89,6 +89,7 @@ mod macros;
pub mod arena;
pub mod error;
pub mod hir;
+pub mod hooks;
pub mod infer;
pub mod lint;
pub mod metadata;
diff --git a/compiler/rustc_middle/src/lint.rs b/compiler/rustc_middle/src/lint.rs
index f62e40669..59849e8eb 100644
--- a/compiler/rustc_middle/src/lint.rs
+++ b/compiler/rustc_middle/src/lint.rs
@@ -225,6 +225,9 @@ pub fn explain_lint_level_source(
err.note_once(format!(
"`{flag} {hyphen_case_lint_name}` implied by `{flag} {hyphen_case_flag_val}`"
));
+ err.help_once(format!(
+ "to override `{flag} {hyphen_case_flag_val}` add `#[allow({name})]`"
+ ));
}
}
LintLevelSource::Node { name: lint_attr_name, span, reason, .. } => {
@@ -311,7 +314,10 @@ pub fn struct_lint_level(
// Default allow lints trigger too often for testing.
sess.opts.unstable_opts.future_incompat_test && lint.default_level != Level::Allow,
|incompat| {
- matches!(incompat.reason, FutureIncompatibilityReason::FutureReleaseErrorReportNow)
+ matches!(
+ incompat.reason,
+ FutureIncompatibilityReason::FutureReleaseErrorReportInDeps
+ )
},
);
@@ -401,8 +407,8 @@ pub fn struct_lint_level(
if let Some(future_incompatible) = future_incompatible {
let explanation = match future_incompatible.reason {
- FutureIncompatibilityReason::FutureReleaseError
- | FutureIncompatibilityReason::FutureReleaseErrorReportNow => {
+ FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps
+ | FutureIncompatibilityReason::FutureReleaseErrorReportInDeps => {
"this was previously accepted by the compiler but is being phased out; \
it will become a hard error in a future release!"
.to_owned()
diff --git a/compiler/rustc_middle/src/macros.rs b/compiler/rustc_middle/src/macros.rs
index fca16d8e5..c1884bb80 100644
--- a/compiler/rustc_middle/src/macros.rs
+++ b/compiler/rustc_middle/src/macros.rs
@@ -42,7 +42,7 @@ macro_rules! span_bug {
// the impls for you.
#[macro_export]
-macro_rules! CloneLiftImpls {
+macro_rules! TrivialLiftImpls {
($($ty:ty),+ $(,)?) => {
$(
impl<'tcx> $crate::ty::Lift<'tcx> for $ty {
@@ -96,6 +96,6 @@ macro_rules! TrivialTypeTraversalImpls {
macro_rules! TrivialTypeTraversalAndLiftImpls {
($($t:tt)*) => {
TrivialTypeTraversalImpls! { $($t)* }
- CloneLiftImpls! { $($t)* }
+ TrivialLiftImpls! { $($t)* }
}
}
diff --git a/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs
index 02fd6ed7b..4e5725876 100644
--- a/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs
+++ b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs
@@ -87,7 +87,7 @@ bitflags! {
/// #[cmse_nonsecure_entry]: with a TrustZone-M extension, declare a
/// function as an entry function from Non-Secure code.
const CMSE_NONSECURE_ENTRY = 1 << 14;
- /// `#[no_coverage]`: indicates that the function should be ignored by
+ /// `#[coverage(off)]`: indicates that the function should be ignored by
/// the MIR `InstrumentCoverage` pass and not added to the coverage map
/// during codegen.
const NO_COVERAGE = 1 << 15;
diff --git a/compiler/rustc_middle/src/mir/basic_blocks.rs b/compiler/rustc_middle/src/mir/basic_blocks.rs
index 0ad17e819..3ecd5b9cd 100644
--- a/compiler/rustc_middle/src/mir/basic_blocks.rs
+++ b/compiler/rustc_middle/src/mir/basic_blocks.rs
@@ -5,7 +5,7 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::graph;
use rustc_data_structures::graph::dominators::{dominators, Dominators};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
-use rustc_data_structures::sync::OnceCell;
+use rustc_data_structures::sync::OnceLock;
use rustc_index::{IndexSlice, IndexVec};
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use smallvec::SmallVec;
@@ -23,11 +23,11 @@ pub type SwitchSources = FxHashMap<(BasicBlock, BasicBlock), SmallVec<[Option<u1
#[derive(Clone, Default, Debug)]
struct Cache {
- predecessors: OnceCell<Predecessors>,
- switch_sources: OnceCell<SwitchSources>,
- is_cyclic: OnceCell<bool>,
- reverse_postorder: OnceCell<Vec<BasicBlock>>,
- dominators: OnceCell<Dominators<BasicBlock>>,
+ predecessors: OnceLock<Predecessors>,
+ switch_sources: OnceLock<SwitchSources>,
+ is_cyclic: OnceLock<bool>,
+ reverse_postorder: OnceLock<Vec<BasicBlock>>,
+ dominators: OnceLock<Dominators<BasicBlock>>,
}
impl<'tcx> BasicBlocks<'tcx> {
@@ -63,11 +63,14 @@ impl<'tcx> BasicBlocks<'tcx> {
}
/// Returns basic blocks in a reverse postorder.
+ ///
+ /// See [`traversal::reverse_postorder`]'s docs to learn what is preorder traversal.
+ ///
+ /// [`traversal::reverse_postorder`]: crate::mir::traversal::reverse_postorder
#[inline]
pub fn reverse_postorder(&self) -> &[BasicBlock] {
self.cache.reverse_postorder.get_or_init(|| {
- let mut rpo: Vec<_> =
- Postorder::new(&self.basic_blocks, START_BLOCK).map(|(bb, _)| bb).collect();
+ let mut rpo: Vec<_> = Postorder::new(&self.basic_blocks, START_BLOCK).collect();
rpo.reverse();
rpo
})
@@ -178,7 +181,7 @@ impl<'tcx> graph::WithPredecessors for BasicBlocks<'tcx> {
}
}
-TrivialTypeTraversalAndLiftImpls! { Cache }
+TrivialTypeTraversalImpls! { Cache }
impl<S: Encoder> Encodable<S> for Cache {
#[inline]
diff --git a/compiler/rustc_middle/src/mir/consts.rs b/compiler/rustc_middle/src/mir/consts.rs
new file mode 100644
index 000000000..7c8a57b84
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/consts.rs
@@ -0,0 +1,522 @@
+use std::fmt::{self, Debug, Display, Formatter};
+
+use rustc_hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::{self as hir};
+use rustc_span::Span;
+use rustc_target::abi::{HasDataLayout, Size};
+
+use crate::mir::interpret::{alloc_range, AllocId, ConstAllocation, ErrorHandled, Scalar};
+use crate::mir::{pretty_print_const_value, Promoted};
+use crate::ty::ScalarInt;
+use crate::ty::{self, print::pretty_print_const, List, Ty, TyCtxt};
+use crate::ty::{GenericArgs, GenericArgsRef};
+
+///////////////////////////////////////////////////////////////////////////
+/// Evaluated Constants
+
+/// Represents the result of const evaluation via the `eval_to_allocation` query.
+/// Not to be confused with `ConstAllocation`, which directly refers to the underlying data!
+/// Here we indirect via an `AllocId`.
+#[derive(Copy, Clone, HashStable, TyEncodable, TyDecodable, Debug, Hash, Eq, PartialEq)]
+pub struct ConstAlloc<'tcx> {
+ /// The value lives here, at offset 0, and that allocation definitely is an `AllocKind::Memory`
+ /// (so you can use `AllocMap::unwrap_memory`).
+ pub alloc_id: AllocId,
+ pub ty: Ty<'tcx>,
+}
+
+/// Represents a constant value in Rust. `Scalar` and `Slice` are optimizations for
+/// array length computations, enum discriminants and the pattern matching logic.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Hash)]
+#[derive(HashStable, Lift)]
+pub enum ConstValue<'tcx> {
+ /// Used for types with `layout::abi::Scalar` ABI.
+ ///
+ /// Not using the enum `Value` to encode that this must not be `Uninit`.
+ Scalar(Scalar),
+
+ /// Only for ZSTs.
+ ZeroSized,
+
+ /// Used for references to unsized types with slice tail.
+ ///
+ /// This is worth an optimized representation since Rust has literals of type `&str` and
+ /// `&[u8]`. Not having to indirect those through an `AllocId` (or two, if we used `Indirect`)
+ /// has shown measurable performance improvements on stress tests. We then reuse this
+ /// optimization for slice-tail types more generally during valtree-to-constval conversion.
+ Slice {
+ /// The allocation storing the slice contents.
+ /// This always points to the beginning of the allocation.
+ data: ConstAllocation<'tcx>,
+ /// The metadata field of the reference.
+ /// This is a "target usize", so we use `u64` as in the interpreter.
+ meta: u64,
+ },
+
+ /// A value not representable by the other variants; needs to be stored in-memory.
+ ///
+ /// Must *not* be used for scalars or ZST, but having `&str` or other slices in this variant is fine.
+ Indirect {
+ /// The backing memory of the value. May contain more memory than needed for just the value
+ /// if this points into some other larger ConstValue.
+ ///
+ /// We use an `AllocId` here instead of a `ConstAllocation<'tcx>` to make sure that when a
+ /// raw constant (which is basically just an `AllocId`) is turned into a `ConstValue` and
+ /// back, we can preserve the original `AllocId`.
+ alloc_id: AllocId,
+ /// Offset into `alloc`
+ offset: Size,
+ },
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(ConstValue<'_>, 24);
+
+impl<'tcx> ConstValue<'tcx> {
+ #[inline]
+ pub fn try_to_scalar(&self) -> Option<Scalar<AllocId>> {
+ match *self {
+ ConstValue::Indirect { .. } | ConstValue::Slice { .. } | ConstValue::ZeroSized => None,
+ ConstValue::Scalar(val) => Some(val),
+ }
+ }
+
+ pub fn try_to_scalar_int(&self) -> Option<ScalarInt> {
+ self.try_to_scalar()?.try_to_int().ok()
+ }
+
+ pub fn try_to_bits(&self, size: Size) -> Option<u128> {
+ self.try_to_scalar_int()?.to_bits(size).ok()
+ }
+
+ pub fn try_to_bool(&self) -> Option<bool> {
+ self.try_to_scalar_int()?.try_into().ok()
+ }
+
+ pub fn try_to_target_usize(&self, tcx: TyCtxt<'tcx>) -> Option<u64> {
+ self.try_to_scalar_int()?.try_to_target_usize(tcx).ok()
+ }
+
+ pub fn try_to_bits_for_ty(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ ) -> Option<u128> {
+ let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
+ self.try_to_bits(size)
+ }
+
+ pub fn from_bool(b: bool) -> Self {
+ ConstValue::Scalar(Scalar::from_bool(b))
+ }
+
+ pub fn from_u64(i: u64) -> Self {
+ ConstValue::Scalar(Scalar::from_u64(i))
+ }
+
+ pub fn from_u128(i: u128) -> Self {
+ ConstValue::Scalar(Scalar::from_u128(i))
+ }
+
+ pub fn from_target_usize(i: u64, cx: &impl HasDataLayout) -> Self {
+ ConstValue::Scalar(Scalar::from_target_usize(i, cx))
+ }
+
+ /// Must only be called on constants of type `&str` or `&[u8]`!
+ pub fn try_get_slice_bytes_for_diagnostics(&self, tcx: TyCtxt<'tcx>) -> Option<&'tcx [u8]> {
+ let (data, start, end) = match self {
+ ConstValue::Scalar(_) | ConstValue::ZeroSized => {
+ bug!("`try_get_slice_bytes` on non-slice constant")
+ }
+ &ConstValue::Slice { data, meta } => (data, 0, meta),
+ &ConstValue::Indirect { alloc_id, offset } => {
+ // The reference itself is stored behind an indirection.
+ // Load the reference, and then load the actual slice contents.
+ let a = tcx.global_alloc(alloc_id).unwrap_memory().inner();
+ let ptr_size = tcx.data_layout.pointer_size;
+ if a.size() < offset + 2 * ptr_size {
+ // (partially) dangling reference
+ return None;
+ }
+ // Read the wide pointer components.
+ let ptr = a
+ .read_scalar(
+ &tcx,
+ alloc_range(offset, ptr_size),
+ /* read_provenance */ true,
+ )
+ .ok()?;
+ let ptr = ptr.to_pointer(&tcx).ok()?;
+ let len = a
+ .read_scalar(
+ &tcx,
+ alloc_range(offset + ptr_size, ptr_size),
+ /* read_provenance */ false,
+ )
+ .ok()?;
+ let len = len.to_target_usize(&tcx).ok()?;
+ if len == 0 {
+ return Some(&[]);
+ }
+ // Non-empty slice, must have memory. We know this is a relative pointer.
+ let (inner_alloc_id, offset) = ptr.into_parts();
+ let data = tcx.global_alloc(inner_alloc_id?).unwrap_memory();
+ (data, offset.bytes(), offset.bytes() + len)
+ }
+ };
+
+ // This is for diagnostics only, so we are okay to use `inspect_with_uninit_and_ptr_outside_interpreter`.
+ let start = start.try_into().unwrap();
+ let end = end.try_into().unwrap();
+ Some(data.inner().inspect_with_uninit_and_ptr_outside_interpreter(start..end))
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+/// Constants
+
+#[derive(Clone, Copy, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable, Debug)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum Const<'tcx> {
+ /// This constant came from the type system.
+ ///
+ /// Any way of turning `ty::Const` into `ConstValue` should go through `valtree_to_const_val`;
+ /// this ensures that we consistently produce "clean" values without data in the padding or
+ /// anything like that.
+ Ty(ty::Const<'tcx>),
+
+ /// An unevaluated mir constant which is not part of the type system.
+ ///
+ /// Note that `Ty(ty::ConstKind::Unevaluated)` and this variant are *not* identical! `Ty` will
+ /// always flow through a valtree, so all data not captured in the valtree is lost. This variant
+ /// directly uses the evaluated result of the given constant, including e.g. data stored in
+ /// padding.
+ Unevaluated(UnevaluatedConst<'tcx>, Ty<'tcx>),
+
+ /// This constant cannot go back into the type system, as it represents
+ /// something the type system cannot handle (e.g. pointers).
+ Val(ConstValue<'tcx>, Ty<'tcx>),
+}
+
+impl<'tcx> Const<'tcx> {
+ #[inline(always)]
+ pub fn ty(&self) -> Ty<'tcx> {
+ match self {
+ Const::Ty(c) => c.ty(),
+ Const::Val(_, ty) | Const::Unevaluated(_, ty) => *ty,
+ }
+ }
+
+ #[inline]
+ pub fn try_to_scalar(self) -> Option<Scalar> {
+ match self {
+ Const::Ty(c) => match c.kind() {
+ ty::ConstKind::Value(valtree) => match valtree {
+ ty::ValTree::Leaf(scalar_int) => Some(Scalar::Int(scalar_int)),
+ ty::ValTree::Branch(_) => None,
+ },
+ _ => None,
+ },
+ Const::Val(val, _) => val.try_to_scalar(),
+ Const::Unevaluated(..) => None,
+ }
+ }
+
+ #[inline]
+ pub fn try_to_scalar_int(self) -> Option<ScalarInt> {
+ self.try_to_scalar()?.try_to_int().ok()
+ }
+
+ #[inline]
+ pub fn try_to_bits(self, size: Size) -> Option<u128> {
+ self.try_to_scalar_int()?.to_bits(size).ok()
+ }
+
+ #[inline]
+ pub fn try_to_bool(self) -> Option<bool> {
+ self.try_to_scalar_int()?.try_into().ok()
+ }
+
+ #[inline]
+ pub fn eval(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ span: Option<Span>,
+ ) -> Result<ConstValue<'tcx>, ErrorHandled> {
+ match self {
+ Const::Ty(c) => {
+ // We want to consistently have a "clean" value for type system constants (i.e., no
+ // data hidden in the padding), so we always go through a valtree here.
+ let val = c.eval(tcx, param_env, span)?;
+ Ok(tcx.valtree_to_const_val((self.ty(), val)))
+ }
+ Const::Unevaluated(uneval, _) => {
+ // FIXME: We might want to have a `try_eval`-like function on `Unevaluated`
+ tcx.const_eval_resolve(param_env, uneval, span)
+ }
+ Const::Val(val, _) => Ok(val),
+ }
+ }
+
+ /// Normalizes the constant to a value or an error if possible.
+ #[inline]
+ pub fn normalize(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self {
+ match self.eval(tcx, param_env, None) {
+ Ok(val) => Self::Val(val, self.ty()),
+ Err(ErrorHandled::Reported(guar, _span)) => {
+ Self::Ty(ty::Const::new_error(tcx, guar.into(), self.ty()))
+ }
+ Err(ErrorHandled::TooGeneric(_span)) => self,
+ }
+ }
+
+ #[inline]
+ pub fn try_eval_scalar(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Option<Scalar> {
+ self.eval(tcx, param_env, None).ok()?.try_to_scalar()
+ }
+
+ #[inline]
+ pub fn try_eval_scalar_int(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Option<ScalarInt> {
+ self.try_eval_scalar(tcx, param_env)?.try_to_int().ok()
+ }
+
+ #[inline]
+ pub fn try_eval_bits(&self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Option<u128> {
+ let int = self.try_eval_scalar_int(tcx, param_env)?;
+ let size =
+ tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(self.ty())).ok()?.size;
+ int.to_bits(size).ok()
+ }
+
+ /// Panics if the value cannot be evaluated or doesn't contain a valid integer of the given type.
+ #[inline]
+ pub fn eval_bits(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> u128 {
+ self.try_eval_bits(tcx, param_env)
+ .unwrap_or_else(|| bug!("expected bits of {:#?}, got {:#?}", self.ty(), self))
+ }
+
+ #[inline]
+ pub fn try_eval_target_usize(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Option<u64> {
+ self.try_eval_scalar_int(tcx, param_env)?.try_to_target_usize(tcx).ok()
+ }
+
+ #[inline]
+ /// Panics if the value cannot be evaluated or doesn't contain a valid `usize`.
+ pub fn eval_target_usize(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> u64 {
+ self.try_eval_target_usize(tcx, param_env)
+ .unwrap_or_else(|| bug!("expected usize, got {:#?}", self))
+ }
+
+ #[inline]
+ pub fn try_eval_bool(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Option<bool> {
+ self.try_eval_scalar_int(tcx, param_env)?.try_into().ok()
+ }
+
+ #[inline]
+ pub fn from_value(val: ConstValue<'tcx>, ty: Ty<'tcx>) -> Self {
+ Self::Val(val, ty)
+ }
+
+ pub fn from_bits(
+ tcx: TyCtxt<'tcx>,
+ bits: u128,
+ param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+ ) -> Self {
+ let size = tcx
+ .layout_of(param_env_ty)
+ .unwrap_or_else(|e| {
+ bug!("could not compute layout for {:?}: {:?}", param_env_ty.value, e)
+ })
+ .size;
+ let cv = ConstValue::Scalar(Scalar::from_uint(bits, size));
+
+ Self::Val(cv, param_env_ty.value)
+ }
+
+ #[inline]
+ pub fn from_bool(tcx: TyCtxt<'tcx>, v: bool) -> Self {
+ let cv = ConstValue::from_bool(v);
+ Self::Val(cv, tcx.types.bool)
+ }
+
+ #[inline]
+ pub fn zero_sized(ty: Ty<'tcx>) -> Self {
+ let cv = ConstValue::ZeroSized;
+ Self::Val(cv, ty)
+ }
+
+ pub fn from_usize(tcx: TyCtxt<'tcx>, n: u64) -> Self {
+ let ty = tcx.types.usize;
+ Self::from_bits(tcx, n as u128, ty::ParamEnv::empty().and(ty))
+ }
+
+ #[inline]
+ pub fn from_scalar(_tcx: TyCtxt<'tcx>, s: Scalar, ty: Ty<'tcx>) -> Self {
+ let val = ConstValue::Scalar(s);
+ Self::Val(val, ty)
+ }
+
+ /// Literals are converted to `Const::Val`, const generic parameters are eagerly
+ /// converted to a constant, everything else becomes `Unevaluated`.
+ #[instrument(skip(tcx), level = "debug", ret)]
+ pub fn from_anon_const(
+ tcx: TyCtxt<'tcx>,
+ def: LocalDefId,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Self {
+ let body_id = match tcx.hir().get_by_def_id(def) {
+ hir::Node::AnonConst(ac) => ac.body,
+ _ => {
+ span_bug!(tcx.def_span(def), "from_anon_const can only process anonymous constants")
+ }
+ };
+
+ let expr = &tcx.hir().body(body_id).value;
+ debug!(?expr);
+
+ // Unwrap a block, so that e.g. `{ P }` is recognised as a parameter. Const arguments
+ // currently have to be wrapped in curly brackets, so it's necessary to special-case.
+ let expr = match &expr.kind {
+ hir::ExprKind::Block(block, _) if block.stmts.is_empty() && block.expr.is_some() => {
+ block.expr.as_ref().unwrap()
+ }
+ _ => expr,
+ };
+ debug!("expr.kind: {:?}", expr.kind);
+
+ let ty = tcx.type_of(def).instantiate_identity();
+ debug!(?ty);
+
+ // FIXME(const_generics): We currently have to special case parameters because `min_const_generics`
+ // does not provide the parents generics to anonymous constants. We still allow generic const
+ // parameters by themselves however, e.g. `N`. These constants would cause an ICE if we were to
+ // ever try to substitute the generic parameters in their bodies.
+ //
+ // While this doesn't happen as these constants are always used as `ty::ConstKind::Param`, it does
+ // cause issues if we were to remove that special-case and try to evaluate the constant instead.
+ use hir::{def::DefKind::ConstParam, def::Res, ExprKind, Path, QPath};
+ match expr.kind {
+ ExprKind::Path(QPath::Resolved(_, &Path { res: Res::Def(ConstParam, def_id), .. })) => {
+ // Find the name and index of the const parameter by indexing the generics of
+ // the parent item and construct a `ParamConst`.
+ let item_def_id = tcx.parent(def_id);
+ let generics = tcx.generics_of(item_def_id);
+ let index = generics.param_def_id_to_index[&def_id];
+ let name = tcx.item_name(def_id);
+ let ty_const = ty::Const::new_param(tcx, ty::ParamConst::new(index, name), ty);
+ debug!(?ty_const);
+
+ return Self::Ty(ty_const);
+ }
+ _ => {}
+ }
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def);
+ let parent_args = if let Some(parent_hir_id) = tcx.hir().opt_parent_id(hir_id)
+ && let Some(parent_did) = parent_hir_id.as_owner()
+ {
+ GenericArgs::identity_for_item(tcx, parent_did)
+ } else {
+ List::empty()
+ };
+ debug!(?parent_args);
+
+ let did = def.to_def_id();
+ let child_args = GenericArgs::identity_for_item(tcx, did);
+ let args = tcx.mk_args_from_iter(parent_args.into_iter().chain(child_args.into_iter()));
+ debug!(?args);
+
+ let span = tcx.def_span(def);
+ let uneval = UnevaluatedConst::new(did, args);
+ debug!(?span, ?param_env);
+
+ match tcx.const_eval_resolve(param_env, uneval, Some(span)) {
+ Ok(val) => {
+ debug!("evaluated const value");
+ Self::Val(val, ty)
+ }
+ Err(_) => {
+ debug!("error encountered during evaluation");
+ // Error was handled in `const_eval_resolve`. Here we just create a
+ // new unevaluated const and error hard later in codegen
+ Self::Unevaluated(
+ UnevaluatedConst {
+ def: did,
+ args: GenericArgs::identity_for_item(tcx, did),
+ promoted: None,
+ },
+ ty,
+ )
+ }
+ }
+ }
+
+ pub fn from_ty_const(c: ty::Const<'tcx>, tcx: TyCtxt<'tcx>) -> Self {
+ match c.kind() {
+ ty::ConstKind::Value(valtree) => {
+ // Make sure that if `c` is normalized, then the return value is normalized.
+ let const_val = tcx.valtree_to_const_val((c.ty(), valtree));
+ Self::Val(const_val, c.ty())
+ }
+ _ => Self::Ty(c),
+ }
+ }
+}
+
+/// An unevaluated (potentially generic) constant used in MIR.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)]
+#[derive(Hash, HashStable, TypeFoldable, TypeVisitable)]
+pub struct UnevaluatedConst<'tcx> {
+ pub def: DefId,
+ pub args: GenericArgsRef<'tcx>,
+ pub promoted: Option<Promoted>,
+}
+
+impl<'tcx> UnevaluatedConst<'tcx> {
+ #[inline]
+ pub fn shrink(self) -> ty::UnevaluatedConst<'tcx> {
+ assert_eq!(self.promoted, None);
+ ty::UnevaluatedConst { def: self.def, args: self.args }
+ }
+}
+
+impl<'tcx> UnevaluatedConst<'tcx> {
+ #[inline]
+ pub fn new(def: DefId, args: GenericArgsRef<'tcx>) -> UnevaluatedConst<'tcx> {
+ UnevaluatedConst { def, args, promoted: Default::default() }
+ }
+
+ #[inline]
+ pub fn from_instance(instance: ty::Instance<'tcx>) -> Self {
+ UnevaluatedConst::new(instance.def_id(), instance.args)
+ }
+}
+
+impl<'tcx> Display for Const<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ match *self {
+ Const::Ty(c) => pretty_print_const(c, fmt, true),
+ Const::Val(val, ty) => pretty_print_const_value(val, ty, fmt),
+ // FIXME(valtrees): Correctly print mir constants.
+ Const::Unevaluated(..) => {
+ fmt.write_str("_")?;
+ Ok(())
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/coverage.rs b/compiler/rustc_middle/src/mir/coverage.rs
index 1efb54bdb..9ef673922 100644
--- a/compiler/rustc_middle/src/mir/coverage.rs
+++ b/compiler/rustc_middle/src/mir/coverage.rs
@@ -45,16 +45,6 @@ impl ExpressionId {
}
}
-rustc_index::newtype_index! {
- /// MappedExpressionIndex values ascend from zero, and are recalculated indexes based on their
- /// array position in the LLVM coverage map "Expressions" array, which is assembled during the
- /// "mapgen" process. They cannot be computed algorithmically, from the other `newtype_index`s.
- #[derive(HashStable)]
- #[max = 0xFFFF_FFFF]
- #[debug_format = "MappedExpressionIndex({})"]
- pub struct MappedExpressionIndex {}
-}
-
/// Operand of a coverage-counter expression.
///
/// Operands can be a constant zero value, an actual coverage counter, or another
diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs
index e6ef5a41e..bc464aca5 100644
--- a/compiler/rustc_middle/src/mir/interpret/error.rs
+++ b/compiler/rustc_middle/src/mir/interpret/error.rs
@@ -1,8 +1,9 @@
-use super::{AllocId, AllocRange, ConstAlloc, Pointer, Scalar};
+use super::{AllocId, AllocRange, Pointer, Scalar};
-use crate::mir::interpret::ConstValue;
+use crate::error;
+use crate::mir::{ConstAlloc, ConstValue};
use crate::query::TyCtxtAt;
-use crate::ty::{layout, tls, Ty, ValTree};
+use crate::ty::{layout, tls, Ty, TyCtxt, ValTree};
use rustc_data_structures::sync::Lock;
use rustc_errors::{
@@ -11,7 +12,7 @@ use rustc_errors::{
};
use rustc_macros::HashStable;
use rustc_session::CtfeBacktrace;
-use rustc_span::def_id::DefId;
+use rustc_span::{def_id::DefId, Span, DUMMY_SP};
use rustc_target::abi::{call, Align, Size, VariantIdx, WrappingRange};
use std::borrow::Cow;
@@ -21,16 +22,51 @@ use std::{any::Any, backtrace::Backtrace, fmt};
pub enum ErrorHandled {
/// Already reported an error for this evaluation, and the compilation is
/// *guaranteed* to fail. Warnings/lints *must not* produce `Reported`.
- Reported(ReportedErrorInfo),
+ Reported(ReportedErrorInfo, Span),
/// Don't emit an error, the evaluation failed because the MIR was generic
/// and the args didn't fully monomorphize it.
- TooGeneric,
+ TooGeneric(Span),
}
impl From<ErrorGuaranteed> for ErrorHandled {
#[inline]
fn from(error: ErrorGuaranteed) -> ErrorHandled {
- ErrorHandled::Reported(error.into())
+ ErrorHandled::Reported(error.into(), DUMMY_SP)
+ }
+}
+
+impl ErrorHandled {
+ pub fn with_span(self, span: Span) -> Self {
+ match self {
+ ErrorHandled::Reported(err, _span) => ErrorHandled::Reported(err, span),
+ ErrorHandled::TooGeneric(_span) => ErrorHandled::TooGeneric(span),
+ }
+ }
+
+ pub fn emit_err(&self, tcx: TyCtxt<'_>) -> ErrorGuaranteed {
+ match self {
+ &ErrorHandled::Reported(err, span) => {
+ if !err.is_tainted_by_errors && !span.is_dummy() {
+ tcx.sess.emit_err(error::ErroneousConstant { span });
+ }
+ err.error
+ }
+ &ErrorHandled::TooGeneric(span) => tcx.sess.delay_span_bug(
+ span,
+ "encountered TooGeneric error when monomorphic data was expected",
+ ),
+ }
+ }
+
+ pub fn emit_note(&self, tcx: TyCtxt<'_>) {
+ match self {
+ &ErrorHandled::Reported(err, span) => {
+ if !err.is_tainted_by_errors && !span.is_dummy() {
+ tcx.sess.emit_note(error::ErroneousConstant { span });
+ }
+ }
+ &ErrorHandled::TooGeneric(_) => {}
+ }
}
}
@@ -45,12 +81,6 @@ impl ReportedErrorInfo {
pub fn tainted_by_errors(error: ErrorGuaranteed) -> ReportedErrorInfo {
ReportedErrorInfo { is_tainted_by_errors: true, error }
}
-
- /// Returns true if evaluation failed because MIR was tainted by errors.
- #[inline]
- pub fn is_tainted_by_errors(self) -> bool {
- self.is_tainted_by_errors
- }
}
impl From<ErrorGuaranteed> for ReportedErrorInfo {
@@ -67,10 +97,12 @@ impl Into<ErrorGuaranteed> for ReportedErrorInfo {
}
}
-TrivialTypeTraversalAndLiftImpls! { ErrorHandled }
+TrivialTypeTraversalImpls! { ErrorHandled }
pub type EvalToAllocationRawResult<'tcx> = Result<ConstAlloc<'tcx>, ErrorHandled>;
pub type EvalToConstValueResult<'tcx> = Result<ConstValue<'tcx>, ErrorHandled>;
+/// `Ok(None)` indicates the constant was fine, but the valtree couldn't be constructed.
+/// This is needed in `thir::pattern::lower_inline_const`.
pub type EvalToValTreeResult<'tcx> = Result<Option<ValTree<'tcx>>, ErrorHandled>;
pub fn struct_error<'tcx>(
@@ -160,6 +192,16 @@ impl From<ErrorGuaranteed> for InterpErrorInfo<'_> {
}
}
+impl From<ErrorHandled> for InterpErrorInfo<'_> {
+ fn from(err: ErrorHandled) -> Self {
+ InterpError::InvalidProgram(match err {
+ ErrorHandled::Reported(r, _span) => InvalidProgramInfo::AlreadyReported(r),
+ ErrorHandled::TooGeneric(_span) => InvalidProgramInfo::TooGeneric,
+ })
+ .into()
+ }
+}
+
impl<'tcx> From<InterpError<'tcx>> for InterpErrorInfo<'tcx> {
fn from(kind: InterpError<'tcx>) -> Self {
InterpErrorInfo(Box::new(InterpErrorInfoInner {
@@ -255,9 +297,16 @@ impl_into_diagnostic_arg_through_debug! {
/// Error information for when the program caused Undefined Behavior.
#[derive(Debug)]
-pub enum UndefinedBehaviorInfo<'a> {
+pub enum UndefinedBehaviorInfo<'tcx> {
/// Free-form case. Only for errors that are never caught! Used by miri
Ub(String),
+ // FIXME(fee1-dead) these should all be actual variants of the enum instead of dynamically
+ // dispatched
+ /// A custom (free-form) fluent-translated error, created by `err_ub_custom!`.
+ Custom(crate::error::CustomSubdiagnostic<'tcx>),
+ /// Validation error.
+ ValidationError(ValidationErrorInfo<'tcx>),
+
/// Unreachable code was executed.
Unreachable,
/// A slice/array index projection went out-of-bounds.
@@ -319,12 +368,10 @@ pub enum UndefinedBehaviorInfo<'a> {
UninhabitedEnumVariantWritten(VariantIdx),
/// An uninhabited enum variant is projected.
UninhabitedEnumVariantRead(VariantIdx),
- /// Validation error.
- ValidationError(ValidationErrorInfo<'a>),
- // FIXME(fee1-dead) these should all be actual variants of the enum instead of dynamically
- // dispatched
- /// A custom (free-form) error, created by `err_ub_custom!`.
- Custom(crate::error::CustomSubdiagnostic<'a>),
+ /// ABI-incompatible argument types.
+ AbiMismatchArgument { caller_ty: Ty<'tcx>, callee_ty: Ty<'tcx> },
+ /// ABI-incompatible return types.
+ AbiMismatchReturn { caller_ty: Ty<'tcx>, callee_ty: Ty<'tcx> },
}
#[derive(Debug, Clone, Copy)]
@@ -415,6 +462,8 @@ pub enum UnsupportedOpInfo {
/// Free-form case. Only for errors that are never caught!
// FIXME still use translatable diagnostics
Unsupported(String),
+ /// Unsized local variables.
+ UnsizedLocal,
//
// The variants below are only reachable from CTFE/const prop, miri will never emit them.
//
diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs
index 3543158bf..d21f82f04 100644
--- a/compiler/rustc_middle/src/mir/interpret/mod.rs
+++ b/compiler/rustc_middle/src/mir/interpret/mod.rs
@@ -149,7 +149,7 @@ pub use self::error::{
UnsupportedOpInfo, ValidationErrorInfo, ValidationErrorKind,
};
-pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar};
+pub use self::value::Scalar;
pub use self::allocation::{
alloc_range, AllocBytes, AllocError, AllocRange, AllocResult, Allocation, ConstAllocation,
@@ -162,7 +162,7 @@ pub use self::pointer::{Pointer, PointerArithmetic, Provenance};
/// - A constant
/// - A static
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, TyEncodable, TyDecodable)]
-#[derive(HashStable, Lift, TypeFoldable, TypeVisitable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
pub struct GlobalId<'tcx> {
/// For a constant or static, the `Instance` of the item itself.
/// For a promoted global, the `Instance` of the function they belong to.
@@ -389,7 +389,7 @@ impl<'s> AllocDecodingSession<'s> {
trace!("creating fn alloc ID");
let instance = ty::Instance::decode(decoder);
trace!("decoded fn alloc instance: {:?}", instance);
- let alloc_id = decoder.interner().create_fn_alloc(instance);
+ let alloc_id = decoder.interner().reserve_and_set_fn_alloc(instance);
alloc_id
}
AllocDiscriminant::VTable => {
@@ -399,7 +399,8 @@ impl<'s> AllocDecodingSession<'s> {
let poly_trait_ref =
<Option<ty::PolyExistentialTraitRef<'_>> as Decodable<D>>::decode(decoder);
trace!("decoded vtable alloc instance: {ty:?}, {poly_trait_ref:?}");
- let alloc_id = decoder.interner().create_vtable_alloc(ty, poly_trait_ref);
+ let alloc_id =
+ decoder.interner().reserve_and_set_vtable_alloc(ty, poly_trait_ref);
alloc_id
}
AllocDiscriminant::Static => {
@@ -407,7 +408,7 @@ impl<'s> AllocDecodingSession<'s> {
trace!("creating extern static alloc ID");
let did = <DefId as Decodable<D>>::decode(decoder);
trace!("decoded static def-ID: {:?}", did);
- let alloc_id = decoder.interner().create_static_alloc(did);
+ let alloc_id = decoder.interner().reserve_and_set_static_alloc(did);
alloc_id
}
}
@@ -544,13 +545,13 @@ impl<'tcx> TyCtxt<'tcx> {
/// Generates an `AllocId` for a static or return a cached one in case this function has been
/// called on the same static before.
- pub fn create_static_alloc(self, static_id: DefId) -> AllocId {
+ pub fn reserve_and_set_static_alloc(self, static_id: DefId) -> AllocId {
self.reserve_and_set_dedup(GlobalAlloc::Static(static_id))
}
/// Generates an `AllocId` for a function. Depending on the function type,
/// this might get deduplicated or assigned a new ID each time.
- pub fn create_fn_alloc(self, instance: Instance<'tcx>) -> AllocId {
+ pub fn reserve_and_set_fn_alloc(self, instance: Instance<'tcx>) -> AllocId {
// Functions cannot be identified by pointers, as asm-equal functions can get deduplicated
// by the linker (we set the "unnamed_addr" attribute for LLVM) and functions can be
// duplicated across crates.
@@ -575,7 +576,7 @@ impl<'tcx> TyCtxt<'tcx> {
}
/// Generates an `AllocId` for a (symbolic, not-reified) vtable. Will get deduplicated.
- pub fn create_vtable_alloc(
+ pub fn reserve_and_set_vtable_alloc(
self,
ty: Ty<'tcx>,
poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
@@ -588,7 +589,7 @@ impl<'tcx> TyCtxt<'tcx> {
/// Statics with identical content will still point to the same `Allocation`, i.e.,
/// their data will be deduplicated through `Allocation` interning -- but they
/// are different places in memory and as such need different IDs.
- pub fn create_memory_alloc(self, mem: ConstAllocation<'tcx>) -> AllocId {
+ pub fn reserve_and_set_memory_alloc(self, mem: ConstAllocation<'tcx>) -> AllocId {
let id = self.reserve_alloc_id();
self.set_alloc_id_memory(id, mem);
id
diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs
index 65d049193..1c9ce1cb1 100644
--- a/compiler/rustc_middle/src/mir/interpret/pointer.rs
+++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs
@@ -103,7 +103,7 @@ impl<T: HasDataLayout> PointerArithmetic for T {}
/// mostly opaque; the `Machine` trait extends it with some more operations that also have access to
/// some global state.
/// The `Debug` rendering is used to display bare provenance, and for the default impl of `fmt`.
-pub trait Provenance: Copy + fmt::Debug {
+pub trait Provenance: Copy + fmt::Debug + 'static {
/// Says whether the `offset` field of `Pointer`s with this provenance is the actual physical address.
/// - If `false`, the offset *must* be relative. This means the bytes representing a pointer are
/// different from what the Abstract Machine prescribes, so the interpreter must prevent any
diff --git a/compiler/rustc_middle/src/mir/interpret/queries.rs b/compiler/rustc_middle/src/mir/interpret/queries.rs
index fc659ce18..fbf6403ea 100644
--- a/compiler/rustc_middle/src/mir/interpret/queries.rs
+++ b/compiler/rustc_middle/src/mir/interpret/queries.rs
@@ -61,8 +61,10 @@ impl<'tcx> TyCtxt<'tcx> {
let cid = GlobalId { instance, promoted: ct.promoted };
self.const_eval_global_id(param_env, cid, span)
}
- Ok(None) => Err(ErrorHandled::TooGeneric),
- Err(err) => Err(ErrorHandled::Reported(err.into())),
+ // For errors during resolution, we deliberately do not point at the usage site of the constant,
+ // since for these errors the place the constant is used shouldn't matter.
+ Ok(None) => Err(ErrorHandled::TooGeneric(DUMMY_SP)),
+ Err(err) => Err(ErrorHandled::Reported(err.into(), DUMMY_SP)),
}
}
@@ -117,8 +119,10 @@ impl<'tcx> TyCtxt<'tcx> {
}
})
}
- Ok(None) => Err(ErrorHandled::TooGeneric),
- Err(err) => Err(ErrorHandled::Reported(err.into())),
+ // For errors during resolution, we deliberately do not point at the usage site of the constant,
+ // since for these errors the place the constant is used shouldn't matter.
+ Ok(None) => Err(ErrorHandled::TooGeneric(DUMMY_SP)),
+ Err(err) => Err(ErrorHandled::Reported(err.into(), DUMMY_SP)),
}
}
@@ -143,7 +147,8 @@ impl<'tcx> TyCtxt<'tcx> {
// improve caching of queries.
let inputs = self.erase_regions(param_env.and(cid));
if let Some(span) = span {
- self.at(span).eval_to_const_value_raw(inputs)
+ // The query doesn't know where it is being invoked, so we need to fix the span.
+ self.at(span).eval_to_const_value_raw(inputs).map_err(|e| e.with_span(span))
} else {
self.eval_to_const_value_raw(inputs)
}
@@ -162,7 +167,8 @@ impl<'tcx> TyCtxt<'tcx> {
let inputs = self.erase_regions(param_env.and(cid));
debug!(?inputs);
if let Some(span) = span {
- self.at(span).eval_to_valtree(inputs)
+ // The query doesn't know where it is being invoked, so we need to fix the span.
+ self.at(span).eval_to_valtree(inputs).map_err(|e| e.with_span(span))
} else {
self.eval_to_valtree(inputs)
}
diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs
index 5345a6588..0d548f886 100644
--- a/compiler/rustc_middle/src/mir/interpret/value.rs
+++ b/compiler/rustc_middle/src/mir/interpret/value.rs
@@ -9,102 +9,9 @@ use rustc_apfloat::{
use rustc_macros::HashStable;
use rustc_target::abi::{HasDataLayout, Size};
-use crate::ty::{ParamEnv, ScalarInt, Ty, TyCtxt};
+use crate::ty::ScalarInt;
-use super::{
- AllocId, AllocRange, ConstAllocation, InterpResult, Pointer, PointerArithmetic, Provenance,
- ScalarSizeMismatch,
-};
-
-/// Represents the result of const evaluation via the `eval_to_allocation` query.
-#[derive(Copy, Clone, HashStable, TyEncodable, TyDecodable, Debug, Hash, Eq, PartialEq)]
-pub struct ConstAlloc<'tcx> {
- /// The value lives here, at offset 0, and that allocation definitely is an `AllocKind::Memory`
- /// (so you can use `AllocMap::unwrap_memory`).
- pub alloc_id: AllocId,
- pub ty: Ty<'tcx>,
-}
-
-/// Represents a constant value in Rust. `Scalar` and `Slice` are optimizations for
-/// array length computations, enum discriminants and the pattern matching logic.
-#[derive(Copy, Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Hash)]
-#[derive(HashStable, Lift)]
-pub enum ConstValue<'tcx> {
- /// Used only for types with `layout::abi::Scalar` ABI.
- ///
- /// Not using the enum `Value` to encode that this must not be `Uninit`.
- Scalar(Scalar),
-
- /// Only used for ZSTs.
- ZeroSized,
-
- /// Used only for `&[u8]` and `&str`
- Slice { data: ConstAllocation<'tcx>, start: usize, end: usize },
-
- /// A value not represented/representable by `Scalar` or `Slice`
- ByRef {
- /// The backing memory of the value, may contain more memory than needed for just the value
- /// in order to share `ConstAllocation`s between values
- alloc: ConstAllocation<'tcx>,
- /// Offset into `alloc`
- offset: Size,
- },
-}
-
-#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-static_assert_size!(ConstValue<'_>, 32);
-
-impl<'tcx> ConstValue<'tcx> {
- #[inline]
- pub fn try_to_scalar(&self) -> Option<Scalar<AllocId>> {
- match *self {
- ConstValue::ByRef { .. } | ConstValue::Slice { .. } | ConstValue::ZeroSized => None,
- ConstValue::Scalar(val) => Some(val),
- }
- }
-
- pub fn try_to_scalar_int(&self) -> Option<ScalarInt> {
- self.try_to_scalar()?.try_to_int().ok()
- }
-
- pub fn try_to_bits(&self, size: Size) -> Option<u128> {
- self.try_to_scalar_int()?.to_bits(size).ok()
- }
-
- pub fn try_to_bool(&self) -> Option<bool> {
- self.try_to_scalar_int()?.try_into().ok()
- }
-
- pub fn try_to_target_usize(&self, tcx: TyCtxt<'tcx>) -> Option<u64> {
- self.try_to_scalar_int()?.try_to_target_usize(tcx).ok()
- }
-
- pub fn try_to_bits_for_ty(
- &self,
- tcx: TyCtxt<'tcx>,
- param_env: ParamEnv<'tcx>,
- ty: Ty<'tcx>,
- ) -> Option<u128> {
- let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
- self.try_to_bits(size)
- }
-
- pub fn from_bool(b: bool) -> Self {
- ConstValue::Scalar(Scalar::from_bool(b))
- }
-
- pub fn from_u64(i: u64) -> Self {
- ConstValue::Scalar(Scalar::from_u64(i))
- }
-
- pub fn from_u128(i: u128) -> Self {
- ConstValue::Scalar(Scalar::from_u128(i))
- }
-
- pub fn from_target_usize(i: u64, cx: &impl HasDataLayout) -> Self {
- ConstValue::Scalar(Scalar::from_target_usize(i, cx))
- }
-}
+use super::{AllocId, InterpResult, Pointer, PointerArithmetic, Provenance, ScalarSizeMismatch};
/// A `Scalar` represents an immediate, primitive value existing outside of a
/// `memory::Allocation`. It is in many ways like a small chunk of an `Allocation`, up to 16 bytes in
@@ -267,6 +174,16 @@ impl<Prov> Scalar<Prov> {
}
#[inline]
+ pub fn from_i8(i: i8) -> Self {
+ Self::from_int(i, Size::from_bits(8))
+ }
+
+ #[inline]
+ pub fn from_i16(i: i16) -> Self {
+ Self::from_int(i, Size::from_bits(16))
+ }
+
+ #[inline]
pub fn from_i32(i: i32) -> Self {
Self::from_int(i, Size::from_bits(32))
}
@@ -494,29 +411,18 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
}
#[inline]
- pub fn to_f32(self) -> InterpResult<'tcx, Single> {
- // Going through `u32` to check size and truncation.
- Ok(Single::from_bits(self.to_u32()?.into()))
+ pub fn to_float<F: Float>(self) -> InterpResult<'tcx, F> {
+ // Going through `to_uint` to check size and truncation.
+ Ok(F::from_bits(self.to_uint(Size::from_bits(F::BITS))?))
}
#[inline]
- pub fn to_f64(self) -> InterpResult<'tcx, Double> {
- // Going through `u64` to check size and truncation.
- Ok(Double::from_bits(self.to_u64()?.into()))
+ pub fn to_f32(self) -> InterpResult<'tcx, Single> {
+ self.to_float()
}
-}
-/// Gets the bytes of a constant slice value.
-pub fn get_slice_bytes<'tcx>(cx: &impl HasDataLayout, val: ConstValue<'tcx>) -> &'tcx [u8] {
- if let ConstValue::Slice { data, start, end } = val {
- let len = end - start;
- data.inner()
- .get_bytes_strip_provenance(
- cx,
- AllocRange { start: Size::from_bytes(start), size: Size::from_bytes(len) },
- )
- .unwrap_or_else(|err| bug!("const slice is invalid: {:?}", err))
- } else {
- bug!("expected const slice, but found another const value");
+ #[inline]
+ pub fn to_f64(self) -> InterpResult<'tcx, Double> {
+ self.to_float()
}
}
diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs
index 9ef3a1b30..0bb1c66da 100644
--- a/compiler/rustc_middle/src/mir/mod.rs
+++ b/compiler/rustc_middle/src/mir/mod.rs
@@ -2,29 +2,29 @@
//!
//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/mir/index.html
-use crate::mir::interpret::{
- AllocRange, ConstAllocation, ConstValue, ErrorHandled, GlobalAlloc, Scalar,
-};
+use crate::mir::interpret::{AllocRange, ConstAllocation, ErrorHandled, Scalar};
use crate::mir::visit::MirVisitable;
use crate::ty::codec::{TyDecoder, TyEncoder};
use crate::ty::fold::{FallibleTypeFolder, TypeFoldable};
+use crate::ty::print::{pretty_print_const, with_no_trimmed_paths};
use crate::ty::print::{FmtPrinter, Printer};
use crate::ty::visit::TypeVisitableExt;
use crate::ty::{self, List, Ty, TyCtxt};
-use crate::ty::{AdtDef, InstanceDef, ScalarInt, UserTypeAnnotationIndex};
-use crate::ty::{GenericArg, GenericArgs, GenericArgsRef};
+use crate::ty::{AdtDef, InstanceDef, UserTypeAnnotationIndex};
+use crate::ty::{GenericArg, GenericArgsRef};
use rustc_data_structures::captures::Captures;
use rustc_errors::{DiagnosticArgValue, DiagnosticMessage, ErrorGuaranteed, IntoDiagnosticArg};
use rustc_hir::def::{CtorKind, Namespace};
-use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
+use rustc_hir::def_id::{DefId, CRATE_DEF_ID};
use rustc_hir::{self, GeneratorKind, ImplicitSelfKind};
use rustc_hir::{self as hir, HirId};
use rustc_session::Session;
-use rustc_target::abi::{FieldIdx, Size, VariantIdx};
+use rustc_target::abi::{FieldIdx, VariantIdx};
use polonius_engine::Atom;
pub use rustc_ast::Mutability;
+use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::graph::dominators::Dominators;
use rustc_index::{Idx, IndexSlice, IndexVec};
@@ -35,7 +35,9 @@ use rustc_span::{Span, DUMMY_SP};
use either::Either;
use std::borrow::Cow;
-use std::fmt::{self, Debug, Display, Formatter, Write};
+use std::cell::RefCell;
+use std::collections::hash_map::Entry;
+use std::fmt::{self, Debug, Formatter};
use std::ops::{Index, IndexMut};
use std::{iter, mem};
@@ -43,6 +45,7 @@ pub use self::query::*;
pub use basic_blocks::BasicBlocks;
mod basic_blocks;
+mod consts;
pub mod coverage;
mod generic_graph;
pub mod generic_graphviz;
@@ -53,11 +56,10 @@ pub mod patch;
pub mod pretty;
mod query;
pub mod spanview;
+mod statement;
mod syntax;
-pub use syntax::*;
pub mod tcx;
-pub mod terminator;
-pub use terminator::*;
+mod terminator;
pub mod traversal;
mod type_foldable;
@@ -68,6 +70,11 @@ pub use self::graphviz::write_mir_graphviz;
pub use self::pretty::{
create_dump_file, display_allocation, dump_enabled, dump_mir, write_mir_pretty, PassWhere,
};
+pub use consts::*;
+use pretty::pretty_print_const_value;
+pub use statement::*;
+pub use syntax::*;
+pub use terminator::*;
/// Types for locals
pub type LocalDecls<'tcx> = IndexSlice<Local, LocalDecl<'tcx>>;
@@ -97,6 +104,36 @@ impl<'tcx> HasLocalDecls<'tcx> for Body<'tcx> {
}
}
+thread_local! {
+ static PASS_NAMES: RefCell<FxHashMap<&'static str, &'static str>> = {
+ RefCell::new(FxHashMap::default())
+ };
+}
+
+/// Converts a MIR pass name into a snake case form to match the profiling naming style.
+fn to_profiler_name(type_name: &'static str) -> &'static str {
+ PASS_NAMES.with(|names| match names.borrow_mut().entry(type_name) {
+ Entry::Occupied(e) => *e.get(),
+ Entry::Vacant(e) => {
+ let snake_case: String = type_name
+ .chars()
+ .flat_map(|c| {
+ if c.is_ascii_uppercase() {
+ vec!['_', c.to_ascii_lowercase()]
+ } else if c == '-' {
+ vec!['_']
+ } else {
+ vec![c]
+ }
+ })
+ .collect();
+ let result = &*String::leak(format!("mir_pass{}", snake_case));
+ e.insert(result);
+ result
+ }
+ })
+}
+
/// A streamlined trait that you can implement to create a pass; the
/// pass will be named after the type, and it will consist of a main
/// loop that goes over each available MIR and applies `run_pass`.
@@ -106,6 +143,10 @@ pub trait MirPass<'tcx> {
if let Some((_, tail)) = name.rsplit_once(':') { tail } else { name }
}
+ fn profiler_name(&self) -> &'static str {
+ to_profiler_name(self.name())
+ }
+
/// Returns `true` if this pass is enabled with the current combination of compiler flags.
fn is_enabled(&self, _sess: &Session) -> bool {
true
@@ -277,7 +318,7 @@ pub struct Body<'tcx> {
/// Constants that are required to evaluate successfully for this MIR to be well-formed.
/// We hold in this field all the constants we are not able to evaluate yet.
- pub required_consts: Vec<Constant<'tcx>>,
+ pub required_consts: Vec<ConstOperand<'tcx>>,
/// Does this body use generic parameters. This is used for the `ConstEvaluatable` check.
///
@@ -527,6 +568,34 @@ impl<'tcx> Body<'tcx> {
pub fn is_custom_mir(&self) -> bool {
self.injection_phase.is_some()
}
+
+ /// *Must* be called once the full substitution for this body is known, to ensure that the body
+ /// is indeed fit for code generation or consumption more generally.
+ ///
+ /// Sadly there's no nice way to represent an "arbitrary normalizer", so we take one for
+ /// constants specifically. (`Option<GenericArgsRef>` could be used for that, but the fact
+ /// that `Instance::args_for_mir_body` is private and instead instance exposes normalization
+ /// functions makes it seem like exposing the generic args is not the intended strategy.)
+ ///
+ /// Also sadly, CTFE doesn't even know whether it runs on MIR that is already polymorphic or still monomorphic,
+ /// so we cannot just immediately ICE on TooGeneric.
+ ///
+ /// Returns Ok(()) if everything went fine, and `Err` if a problem occurred and got reported.
+ pub fn post_mono_checks(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ normalize_const: impl Fn(Const<'tcx>) -> Result<Const<'tcx>, ErrorHandled>,
+ ) -> Result<(), ErrorHandled> {
+ // For now, the only thing we have to check is is to ensure that all the constants used in
+ // the body successfully evaluate.
+ for &const_ in &self.required_consts {
+ let c = normalize_const(const_.const_)?;
+ c.eval(tcx, param_env, Some(const_.span))?;
+ }
+
+ Ok(())
+ }
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, TyEncodable, TyDecodable, HashStable)]
@@ -706,7 +775,7 @@ pub enum BindingForm<'tcx> {
RefForGuard,
}
-TrivialTypeTraversalAndLiftImpls! { BindingForm<'tcx> }
+TrivialTypeTraversalImpls! { BindingForm<'tcx> }
mod binding_form_impl {
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
@@ -1027,20 +1096,7 @@ impl<'tcx> LocalDecl<'tcx> {
pub enum VarDebugInfoContents<'tcx> {
/// This `Place` only contains projection which satisfy `can_use_in_debuginfo`.
Place(Place<'tcx>),
- Const(Constant<'tcx>),
- /// The user variable's data is split across several fragments,
- /// each described by a `VarDebugInfoFragment`.
- /// See DWARF 5's "2.6.1.2 Composite Location Descriptions"
- /// and LLVM's `DW_OP_LLVM_fragment` for more details on
- /// the underlying debuginfo feature this relies on.
- Composite {
- /// Type of the original user variable.
- /// This cannot contain a union or an enum.
- ty: Ty<'tcx>,
- /// All the parts of the original user variable, which ended
- /// up in disjoint places, due to optimizations.
- fragments: Vec<VarDebugInfoFragment<'tcx>>,
- },
+ Const(ConstOperand<'tcx>),
}
impl<'tcx> Debug for VarDebugInfoContents<'tcx> {
@@ -1048,19 +1104,16 @@ impl<'tcx> Debug for VarDebugInfoContents<'tcx> {
match self {
VarDebugInfoContents::Const(c) => write!(fmt, "{c}"),
VarDebugInfoContents::Place(p) => write!(fmt, "{p:?}"),
- VarDebugInfoContents::Composite { ty, fragments } => {
- write!(fmt, "{ty:?}{{ ")?;
- for f in fragments.iter() {
- write!(fmt, "{f:?}, ")?;
- }
- write!(fmt, "}}")
- }
}
}
}
-#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
pub struct VarDebugInfoFragment<'tcx> {
+ /// Type of the original user variable.
+ /// This cannot contain a union or an enum.
+ pub ty: Ty<'tcx>,
+
/// Where in the composite user variable this fragment is,
/// represented as a "projection" into the composite variable.
/// At lower levels, this corresponds to a byte/bit range.
@@ -1071,29 +1124,10 @@ pub struct VarDebugInfoFragment<'tcx> {
// to match on the discriminant, or by using custom type debuginfo
// with non-overlapping variants for the composite variable.
pub projection: Vec<PlaceElem<'tcx>>,
-
- /// Where the data for this fragment can be found.
- /// This `Place` only contains projection which satisfy `can_use_in_debuginfo`.
- pub contents: Place<'tcx>,
-}
-
-impl Debug for VarDebugInfoFragment<'_> {
- fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
- for elem in self.projection.iter() {
- match elem {
- ProjectionElem::Field(field, _) => {
- write!(fmt, ".{:?}", field.index())?;
- }
- _ => bug!("unsupported fragment projection `{:?}`", elem),
- }
- }
-
- write!(fmt, " => {:?}", self.contents)
- }
}
/// Debug information pertaining to a user variable.
-#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
pub struct VarDebugInfo<'tcx> {
pub name: Symbol,
@@ -1102,6 +1136,13 @@ pub struct VarDebugInfo<'tcx> {
/// (see `LocalDecl`'s `source_info` field for more details).
pub source_info: SourceInfo,
+ /// The user variable's data is split across several fragments,
+ /// each described by a `VarDebugInfoFragment`.
+ /// See DWARF 5's "2.6.1.2 Composite Location Descriptions"
+ /// and LLVM's `DW_OP_LLVM_fragment` for more details on
+ /// the underlying debuginfo feature this relies on.
+ pub composite: Option<Box<VarDebugInfoFragment<'tcx>>>,
+
/// Where the data for this user variable is to be found.
pub value: VarDebugInfoContents<'tcx>,
@@ -1267,542 +1308,6 @@ impl<'tcx> BasicBlockData<'tcx> {
}
}
-impl<O> AssertKind<O> {
- /// Returns true if this an overflow checking assertion controlled by -C overflow-checks.
- pub fn is_optional_overflow_check(&self) -> bool {
- use AssertKind::*;
- use BinOp::*;
- matches!(self, OverflowNeg(..) | Overflow(Add | Sub | Mul | Shl | Shr, ..))
- }
-
- /// Getting a description does not require `O` to be printable, and does not
- /// require allocation.
- /// The caller is expected to handle `BoundsCheck` and `MisalignedPointerDereference` separately.
- pub fn description(&self) -> &'static str {
- use AssertKind::*;
- match self {
- Overflow(BinOp::Add, _, _) => "attempt to add with overflow",
- Overflow(BinOp::Sub, _, _) => "attempt to subtract with overflow",
- Overflow(BinOp::Mul, _, _) => "attempt to multiply with overflow",
- Overflow(BinOp::Div, _, _) => "attempt to divide with overflow",
- Overflow(BinOp::Rem, _, _) => "attempt to calculate the remainder with overflow",
- OverflowNeg(_) => "attempt to negate with overflow",
- Overflow(BinOp::Shr, _, _) => "attempt to shift right with overflow",
- Overflow(BinOp::Shl, _, _) => "attempt to shift left with overflow",
- Overflow(op, _, _) => bug!("{:?} cannot overflow", op),
- DivisionByZero(_) => "attempt to divide by zero",
- RemainderByZero(_) => "attempt to calculate the remainder with a divisor of zero",
- ResumedAfterReturn(GeneratorKind::Gen) => "generator resumed after completion",
- ResumedAfterReturn(GeneratorKind::Async(_)) => "`async fn` resumed after completion",
- ResumedAfterPanic(GeneratorKind::Gen) => "generator resumed after panicking",
- ResumedAfterPanic(GeneratorKind::Async(_)) => "`async fn` resumed after panicking",
- BoundsCheck { .. } | MisalignedPointerDereference { .. } => {
- bug!("Unexpected AssertKind")
- }
- }
- }
-
- /// Format the message arguments for the `assert(cond, msg..)` terminator in MIR printing.
- pub fn fmt_assert_args<W: Write>(&self, f: &mut W) -> fmt::Result
- where
- O: Debug,
- {
- use AssertKind::*;
- match self {
- BoundsCheck { ref len, ref index } => write!(
- f,
- "\"index out of bounds: the length is {{}} but the index is {{}}\", {len:?}, {index:?}"
- ),
-
- OverflowNeg(op) => {
- write!(f, "\"attempt to negate `{{}}`, which would overflow\", {op:?}")
- }
- DivisionByZero(op) => write!(f, "\"attempt to divide `{{}}` by zero\", {op:?}"),
- RemainderByZero(op) => write!(
- f,
- "\"attempt to calculate the remainder of `{{}}` with a divisor of zero\", {op:?}"
- ),
- Overflow(BinOp::Add, l, r) => write!(
- f,
- "\"attempt to compute `{{}} + {{}}`, which would overflow\", {l:?}, {r:?}"
- ),
- Overflow(BinOp::Sub, l, r) => write!(
- f,
- "\"attempt to compute `{{}} - {{}}`, which would overflow\", {l:?}, {r:?}"
- ),
- Overflow(BinOp::Mul, l, r) => write!(
- f,
- "\"attempt to compute `{{}} * {{}}`, which would overflow\", {l:?}, {r:?}"
- ),
- Overflow(BinOp::Div, l, r) => write!(
- f,
- "\"attempt to compute `{{}} / {{}}`, which would overflow\", {l:?}, {r:?}"
- ),
- Overflow(BinOp::Rem, l, r) => write!(
- f,
- "\"attempt to compute the remainder of `{{}} % {{}}`, which would overflow\", {l:?}, {r:?}"
- ),
- Overflow(BinOp::Shr, _, r) => {
- write!(f, "\"attempt to shift right by `{{}}`, which would overflow\", {r:?}")
- }
- Overflow(BinOp::Shl, _, r) => {
- write!(f, "\"attempt to shift left by `{{}}`, which would overflow\", {r:?}")
- }
- MisalignedPointerDereference { required, found } => {
- write!(
- f,
- "\"misaligned pointer dereference: address must be a multiple of {{}} but is {{}}\", {required:?}, {found:?}"
- )
- }
- _ => write!(f, "\"{}\"", self.description()),
- }
- }
-
- pub fn diagnostic_message(&self) -> DiagnosticMessage {
- use crate::fluent_generated::*;
- use AssertKind::*;
-
- match self {
- BoundsCheck { .. } => middle_bounds_check,
- Overflow(BinOp::Shl, _, _) => middle_assert_shl_overflow,
- Overflow(BinOp::Shr, _, _) => middle_assert_shr_overflow,
- Overflow(_, _, _) => middle_assert_op_overflow,
- OverflowNeg(_) => middle_assert_overflow_neg,
- DivisionByZero(_) => middle_assert_divide_by_zero,
- RemainderByZero(_) => middle_assert_remainder_by_zero,
- ResumedAfterReturn(GeneratorKind::Async(_)) => middle_assert_async_resume_after_return,
- ResumedAfterReturn(GeneratorKind::Gen) => middle_assert_generator_resume_after_return,
- ResumedAfterPanic(GeneratorKind::Async(_)) => middle_assert_async_resume_after_panic,
- ResumedAfterPanic(GeneratorKind::Gen) => middle_assert_generator_resume_after_panic,
-
- MisalignedPointerDereference { .. } => middle_assert_misaligned_ptr_deref,
- }
- }
-
- pub fn add_args(self, adder: &mut dyn FnMut(Cow<'static, str>, DiagnosticArgValue<'static>))
- where
- O: fmt::Debug,
- {
- use AssertKind::*;
-
- macro_rules! add {
- ($name: expr, $value: expr) => {
- adder($name.into(), $value.into_diagnostic_arg());
- };
- }
-
- match self {
- BoundsCheck { len, index } => {
- add!("len", format!("{len:?}"));
- add!("index", format!("{index:?}"));
- }
- Overflow(BinOp::Shl | BinOp::Shr, _, val)
- | DivisionByZero(val)
- | RemainderByZero(val)
- | OverflowNeg(val) => {
- add!("val", format!("{val:#?}"));
- }
- Overflow(binop, left, right) => {
- add!("op", binop.to_hir_binop().as_str());
- add!("left", format!("{left:#?}"));
- add!("right", format!("{right:#?}"));
- }
- ResumedAfterReturn(_) | ResumedAfterPanic(_) => {}
- MisalignedPointerDereference { required, found } => {
- add!("required", format!("{required:#?}"));
- add!("found", format!("{found:#?}"));
- }
- }
- }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Statements
-
-/// A statement in a basic block, including information about its source code.
-#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
-pub struct Statement<'tcx> {
- pub source_info: SourceInfo,
- pub kind: StatementKind<'tcx>,
-}
-
-impl Statement<'_> {
- /// Changes a statement to a nop. This is both faster than deleting instructions and avoids
- /// invalidating statement indices in `Location`s.
- pub fn make_nop(&mut self) {
- self.kind = StatementKind::Nop
- }
-
- /// Changes a statement to a nop and returns the original statement.
- #[must_use = "If you don't need the statement, use `make_nop` instead"]
- pub fn replace_nop(&mut self) -> Self {
- Statement {
- source_info: self.source_info,
- kind: mem::replace(&mut self.kind, StatementKind::Nop),
- }
- }
-}
-
-impl Debug for Statement<'_> {
- fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
- use self::StatementKind::*;
- match self.kind {
- Assign(box (ref place, ref rv)) => write!(fmt, "{place:?} = {rv:?}"),
- FakeRead(box (ref cause, ref place)) => {
- write!(fmt, "FakeRead({cause:?}, {place:?})")
- }
- Retag(ref kind, ref place) => write!(
- fmt,
- "Retag({}{:?})",
- match kind {
- RetagKind::FnEntry => "[fn entry] ",
- RetagKind::TwoPhase => "[2phase] ",
- RetagKind::Raw => "[raw] ",
- RetagKind::Default => "",
- },
- place,
- ),
- StorageLive(ref place) => write!(fmt, "StorageLive({place:?})"),
- StorageDead(ref place) => write!(fmt, "StorageDead({place:?})"),
- SetDiscriminant { ref place, variant_index } => {
- write!(fmt, "discriminant({place:?}) = {variant_index:?}")
- }
- Deinit(ref place) => write!(fmt, "Deinit({place:?})"),
- PlaceMention(ref place) => {
- write!(fmt, "PlaceMention({place:?})")
- }
- AscribeUserType(box (ref place, ref c_ty), ref variance) => {
- write!(fmt, "AscribeUserType({place:?}, {variance:?}, {c_ty:?})")
- }
- Coverage(box self::Coverage { ref kind, code_region: Some(ref rgn) }) => {
- write!(fmt, "Coverage::{kind:?} for {rgn:?}")
- }
- Coverage(box ref coverage) => write!(fmt, "Coverage::{:?}", coverage.kind),
- Intrinsic(box ref intrinsic) => write!(fmt, "{intrinsic}"),
- ConstEvalCounter => write!(fmt, "ConstEvalCounter"),
- Nop => write!(fmt, "nop"),
- }
- }
-}
-
-impl<'tcx> StatementKind<'tcx> {
- pub fn as_assign_mut(&mut self) -> Option<&mut (Place<'tcx>, Rvalue<'tcx>)> {
- match self {
- StatementKind::Assign(x) => Some(x),
- _ => None,
- }
- }
-
- pub fn as_assign(&self) -> Option<&(Place<'tcx>, Rvalue<'tcx>)> {
- match self {
- StatementKind::Assign(x) => Some(x),
- _ => None,
- }
- }
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Places
-
-impl<V, T> ProjectionElem<V, T> {
- /// Returns `true` if the target of this projection may refer to a different region of memory
- /// than the base.
- fn is_indirect(&self) -> bool {
- match self {
- Self::Deref => true,
-
- Self::Field(_, _)
- | Self::Index(_)
- | Self::OpaqueCast(_)
- | Self::ConstantIndex { .. }
- | Self::Subslice { .. }
- | Self::Downcast(_, _) => false,
- }
- }
-
- /// Returns `true` if the target of this projection always refers to the same memory region
- /// whatever the state of the program.
- pub fn is_stable_offset(&self) -> bool {
- match self {
- Self::Deref | Self::Index(_) => false,
- Self::Field(_, _)
- | Self::OpaqueCast(_)
- | Self::ConstantIndex { .. }
- | Self::Subslice { .. }
- | Self::Downcast(_, _) => true,
- }
- }
-
- /// Returns `true` if this is a `Downcast` projection with the given `VariantIdx`.
- pub fn is_downcast_to(&self, v: VariantIdx) -> bool {
- matches!(*self, Self::Downcast(_, x) if x == v)
- }
-
- /// Returns `true` if this is a `Field` projection with the given index.
- pub fn is_field_to(&self, f: FieldIdx) -> bool {
- matches!(*self, Self::Field(x, _) if x == f)
- }
-
- /// Returns `true` if this is accepted inside `VarDebugInfoContents::Place`.
- pub fn can_use_in_debuginfo(&self) -> bool {
- match self {
- Self::ConstantIndex { from_end: false, .. }
- | Self::Deref
- | Self::Downcast(_, _)
- | Self::Field(_, _) => true,
- Self::ConstantIndex { from_end: true, .. }
- | Self::Index(_)
- | Self::OpaqueCast(_)
- | Self::Subslice { .. } => false,
- }
- }
-}
-
-/// Alias for projections as they appear in `UserTypeProjection`, where we
-/// need neither the `V` parameter for `Index` nor the `T` for `Field`.
-pub type ProjectionKind = ProjectionElem<(), ()>;
-
-#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
-pub struct PlaceRef<'tcx> {
- pub local: Local,
- pub projection: &'tcx [PlaceElem<'tcx>],
-}
-
-// Once we stop implementing `Ord` for `DefId`,
-// this impl will be unnecessary. Until then, we'll
-// leave this impl in place to prevent re-adding a
-// dependency on the `Ord` impl for `DefId`
-impl<'tcx> !PartialOrd for PlaceRef<'tcx> {}
-
-impl<'tcx> Place<'tcx> {
- // FIXME change this to a const fn by also making List::empty a const fn.
- pub fn return_place() -> Place<'tcx> {
- Place { local: RETURN_PLACE, projection: List::empty() }
- }
-
- /// Returns `true` if this `Place` contains a `Deref` projection.
- ///
- /// If `Place::is_indirect` returns false, the caller knows that the `Place` refers to the
- /// same region of memory as its base.
- pub fn is_indirect(&self) -> bool {
- self.projection.iter().any(|elem| elem.is_indirect())
- }
-
- /// Returns `true` if this `Place`'s first projection is `Deref`.
- ///
- /// This is useful because for MIR phases `AnalysisPhase::PostCleanup` and later,
- /// `Deref` projections can only occur as the first projection. In that case this method
- /// is equivalent to `is_indirect`, but faster.
- pub fn is_indirect_first_projection(&self) -> bool {
- self.as_ref().is_indirect_first_projection()
- }
-
- /// Finds the innermost `Local` from this `Place`, *if* it is either a local itself or
- /// a single deref of a local.
- #[inline(always)]
- pub fn local_or_deref_local(&self) -> Option<Local> {
- self.as_ref().local_or_deref_local()
- }
-
- /// If this place represents a local variable like `_X` with no
- /// projections, return `Some(_X)`.
- #[inline(always)]
- pub fn as_local(&self) -> Option<Local> {
- self.as_ref().as_local()
- }
-
- #[inline]
- pub fn as_ref(&self) -> PlaceRef<'tcx> {
- PlaceRef { local: self.local, projection: &self.projection }
- }
-
- /// Iterate over the projections in evaluation order, i.e., the first element is the base with
- /// its projection and then subsequently more projections are added.
- /// As a concrete example, given the place a.b.c, this would yield:
- /// - (a, .b)
- /// - (a.b, .c)
- ///
- /// Given a place without projections, the iterator is empty.
- #[inline]
- pub fn iter_projections(
- self,
- ) -> impl Iterator<Item = (PlaceRef<'tcx>, PlaceElem<'tcx>)> + DoubleEndedIterator {
- self.as_ref().iter_projections()
- }
-
- /// Generates a new place by appending `more_projections` to the existing ones
- /// and interning the result.
- pub fn project_deeper(self, more_projections: &[PlaceElem<'tcx>], tcx: TyCtxt<'tcx>) -> Self {
- if more_projections.is_empty() {
- return self;
- }
-
- self.as_ref().project_deeper(more_projections, tcx)
- }
-}
-
-impl From<Local> for Place<'_> {
- #[inline]
- fn from(local: Local) -> Self {
- Place { local, projection: List::empty() }
- }
-}
-
-impl<'tcx> PlaceRef<'tcx> {
- /// Finds the innermost `Local` from this `Place`, *if* it is either a local itself or
- /// a single deref of a local.
- pub fn local_or_deref_local(&self) -> Option<Local> {
- match *self {
- PlaceRef { local, projection: [] }
- | PlaceRef { local, projection: [ProjectionElem::Deref] } => Some(local),
- _ => None,
- }
- }
-
- /// Returns `true` if this `Place` contains a `Deref` projection.
- ///
- /// If `Place::is_indirect` returns false, the caller knows that the `Place` refers to the
- /// same region of memory as its base.
- pub fn is_indirect(&self) -> bool {
- self.projection.iter().any(|elem| elem.is_indirect())
- }
-
- /// Returns `true` if this `Place`'s first projection is `Deref`.
- ///
- /// This is useful because for MIR phases `AnalysisPhase::PostCleanup` and later,
- /// `Deref` projections can only occur as the first projection. In that case this method
- /// is equivalent to `is_indirect`, but faster.
- pub fn is_indirect_first_projection(&self) -> bool {
- // To make sure this is not accidentally used in wrong mir phase
- debug_assert!(
- self.projection.is_empty() || !self.projection[1..].contains(&PlaceElem::Deref)
- );
- self.projection.first() == Some(&PlaceElem::Deref)
- }
-
- /// If this place represents a local variable like `_X` with no
- /// projections, return `Some(_X)`.
- #[inline]
- pub fn as_local(&self) -> Option<Local> {
- match *self {
- PlaceRef { local, projection: [] } => Some(local),
- _ => None,
- }
- }
-
- #[inline]
- pub fn last_projection(&self) -> Option<(PlaceRef<'tcx>, PlaceElem<'tcx>)> {
- if let &[ref proj_base @ .., elem] = self.projection {
- Some((PlaceRef { local: self.local, projection: proj_base }, elem))
- } else {
- None
- }
- }
-
- /// Iterate over the projections in evaluation order, i.e., the first element is the base with
- /// its projection and then subsequently more projections are added.
- /// As a concrete example, given the place a.b.c, this would yield:
- /// - (a, .b)
- /// - (a.b, .c)
- ///
- /// Given a place without projections, the iterator is empty.
- #[inline]
- pub fn iter_projections(
- self,
- ) -> impl Iterator<Item = (PlaceRef<'tcx>, PlaceElem<'tcx>)> + DoubleEndedIterator {
- self.projection.iter().enumerate().map(move |(i, proj)| {
- let base = PlaceRef { local: self.local, projection: &self.projection[..i] };
- (base, *proj)
- })
- }
-
- /// Generates a new place by appending `more_projections` to the existing ones
- /// and interning the result.
- pub fn project_deeper(
- self,
- more_projections: &[PlaceElem<'tcx>],
- tcx: TyCtxt<'tcx>,
- ) -> Place<'tcx> {
- let mut v: Vec<PlaceElem<'tcx>>;
-
- let new_projections = if self.projection.is_empty() {
- more_projections
- } else {
- v = Vec::with_capacity(self.projection.len() + more_projections.len());
- v.extend(self.projection);
- v.extend(more_projections);
- &v
- };
-
- Place { local: self.local, projection: tcx.mk_place_elems(new_projections) }
- }
-}
-
-impl Debug for Place<'_> {
- fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
- for elem in self.projection.iter().rev() {
- match elem {
- ProjectionElem::OpaqueCast(_)
- | ProjectionElem::Downcast(_, _)
- | ProjectionElem::Field(_, _) => {
- write!(fmt, "(").unwrap();
- }
- ProjectionElem::Deref => {
- write!(fmt, "(*").unwrap();
- }
- ProjectionElem::Index(_)
- | ProjectionElem::ConstantIndex { .. }
- | ProjectionElem::Subslice { .. } => {}
- }
- }
-
- write!(fmt, "{:?}", self.local)?;
-
- for elem in self.projection.iter() {
- match elem {
- ProjectionElem::OpaqueCast(ty) => {
- write!(fmt, " as {ty})")?;
- }
- ProjectionElem::Downcast(Some(name), _index) => {
- write!(fmt, " as {name})")?;
- }
- ProjectionElem::Downcast(None, index) => {
- write!(fmt, " as variant#{index:?})")?;
- }
- ProjectionElem::Deref => {
- write!(fmt, ")")?;
- }
- ProjectionElem::Field(field, ty) => {
- write!(fmt, ".{:?}: {:?})", field.index(), ty)?;
- }
- ProjectionElem::Index(ref index) => {
- write!(fmt, "[{index:?}]")?;
- }
- ProjectionElem::ConstantIndex { offset, min_length, from_end: false } => {
- write!(fmt, "[{offset:?} of {min_length:?}]")?;
- }
- ProjectionElem::ConstantIndex { offset, min_length, from_end: true } => {
- write!(fmt, "[-{offset:?} of {min_length:?}]")?;
- }
- ProjectionElem::Subslice { from, to, from_end: true } if to == 0 => {
- write!(fmt, "[{from:?}:]")?;
- }
- ProjectionElem::Subslice { from, to, from_end: true } if from == 0 => {
- write!(fmt, "[:-{to:?}]")?;
- }
- ProjectionElem::Subslice { from, to, from_end: true } => {
- write!(fmt, "[{from:?}:-{to:?}]")?;
- }
- ProjectionElem::Subslice { from, to, from_end: false } => {
- write!(fmt, "[{from:?}..{to:?}]")?;
- }
- }
- }
-
- Ok(())
- }
-}
-
///////////////////////////////////////////////////////////////////////////
// Scopes
@@ -1881,719 +1386,12 @@ pub struct SourceScopeLocalData {
pub safety: Safety,
}
-///////////////////////////////////////////////////////////////////////////
-// Operands
-
-impl<'tcx> Debug for Operand<'tcx> {
- fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
- use self::Operand::*;
- match *self {
- Constant(ref a) => write!(fmt, "{a:?}"),
- Copy(ref place) => write!(fmt, "{place:?}"),
- Move(ref place) => write!(fmt, "move {place:?}"),
- }
- }
-}
-
-impl<'tcx> Operand<'tcx> {
- /// Convenience helper to make a constant that refers to the fn
- /// with given `DefId` and args. Since this is used to synthesize
- /// MIR, assumes `user_ty` is None.
- pub fn function_handle(
- tcx: TyCtxt<'tcx>,
- def_id: DefId,
- args: impl IntoIterator<Item = GenericArg<'tcx>>,
- span: Span,
- ) -> Self {
- let ty = Ty::new_fn_def(tcx, def_id, args);
- Operand::Constant(Box::new(Constant {
- span,
- user_ty: None,
- literal: ConstantKind::Val(ConstValue::ZeroSized, ty),
- }))
- }
-
- pub fn is_move(&self) -> bool {
- matches!(self, Operand::Move(..))
- }
-
- /// Convenience helper to make a literal-like constant from a given scalar value.
- /// Since this is used to synthesize MIR, assumes `user_ty` is None.
- pub fn const_from_scalar(
- tcx: TyCtxt<'tcx>,
- ty: Ty<'tcx>,
- val: Scalar,
- span: Span,
- ) -> Operand<'tcx> {
- debug_assert!({
- let param_env_and_ty = ty::ParamEnv::empty().and(ty);
- let type_size = tcx
- .layout_of(param_env_and_ty)
- .unwrap_or_else(|e| panic!("could not compute layout for {ty:?}: {e:?}"))
- .size;
- let scalar_size = match val {
- Scalar::Int(int) => int.size(),
- _ => panic!("Invalid scalar type {val:?}"),
- };
- scalar_size == type_size
- });
- Operand::Constant(Box::new(Constant {
- span,
- user_ty: None,
- literal: ConstantKind::Val(ConstValue::Scalar(val), ty),
- }))
- }
-
- pub fn to_copy(&self) -> Self {
- match *self {
- Operand::Copy(_) | Operand::Constant(_) => self.clone(),
- Operand::Move(place) => Operand::Copy(place),
- }
- }
-
- /// Returns the `Place` that is the target of this `Operand`, or `None` if this `Operand` is a
- /// constant.
- pub fn place(&self) -> Option<Place<'tcx>> {
- match self {
- Operand::Copy(place) | Operand::Move(place) => Some(*place),
- Operand::Constant(_) => None,
- }
- }
-
- /// Returns the `Constant` that is the target of this `Operand`, or `None` if this `Operand` is a
- /// place.
- pub fn constant(&self) -> Option<&Constant<'tcx>> {
- match self {
- Operand::Constant(x) => Some(&**x),
- Operand::Copy(_) | Operand::Move(_) => None,
- }
- }
-
- /// Gets the `ty::FnDef` from an operand if it's a constant function item.
- ///
- /// While this is unlikely in general, it's the normal case of what you'll
- /// find as the `func` in a [`TerminatorKind::Call`].
- pub fn const_fn_def(&self) -> Option<(DefId, GenericArgsRef<'tcx>)> {
- let const_ty = self.constant()?.literal.ty();
- if let ty::FnDef(def_id, args) = *const_ty.kind() { Some((def_id, args)) } else { None }
- }
-}
-
-///////////////////////////////////////////////////////////////////////////
-/// Rvalues
-
-impl<'tcx> Rvalue<'tcx> {
- /// Returns true if rvalue can be safely removed when the result is unused.
- #[inline]
- pub fn is_safe_to_remove(&self) -> bool {
- match self {
- // Pointer to int casts may be side-effects due to exposing the provenance.
- // While the model is undecided, we should be conservative. See
- // <https://www.ralfj.de/blog/2022/04/11/provenance-exposed.html>
- Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => false,
-
- Rvalue::Use(_)
- | Rvalue::CopyForDeref(_)
- | Rvalue::Repeat(_, _)
- | Rvalue::Ref(_, _, _)
- | Rvalue::ThreadLocalRef(_)
- | Rvalue::AddressOf(_, _)
- | Rvalue::Len(_)
- | Rvalue::Cast(
- CastKind::IntToInt
- | CastKind::FloatToInt
- | CastKind::FloatToFloat
- | CastKind::IntToFloat
- | CastKind::FnPtrToPtr
- | CastKind::PtrToPtr
- | CastKind::PointerCoercion(_)
- | CastKind::PointerFromExposedAddress
- | CastKind::DynStar
- | CastKind::Transmute,
- _,
- _,
- )
- | Rvalue::BinaryOp(_, _)
- | Rvalue::CheckedBinaryOp(_, _)
- | Rvalue::NullaryOp(_, _)
- | Rvalue::UnaryOp(_, _)
- | Rvalue::Discriminant(_)
- | Rvalue::Aggregate(_, _)
- | Rvalue::ShallowInitBox(_, _) => true,
- }
- }
-}
-
-impl BorrowKind {
- pub fn mutability(&self) -> Mutability {
- match *self {
- BorrowKind::Shared | BorrowKind::Shallow => Mutability::Not,
- BorrowKind::Mut { .. } => Mutability::Mut,
- }
- }
-
- pub fn allows_two_phase_borrow(&self) -> bool {
- match *self {
- BorrowKind::Shared
- | BorrowKind::Shallow
- | BorrowKind::Mut { kind: MutBorrowKind::Default | MutBorrowKind::ClosureCapture } => {
- false
- }
- BorrowKind::Mut { kind: MutBorrowKind::TwoPhaseBorrow } => true,
- }
- }
-}
-
-impl<'tcx> Debug for Rvalue<'tcx> {
- fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
- use self::Rvalue::*;
-
- match *self {
- Use(ref place) => write!(fmt, "{place:?}"),
- Repeat(ref a, b) => {
- write!(fmt, "[{a:?}; ")?;
- pretty_print_const(b, fmt, false)?;
- write!(fmt, "]")
- }
- Len(ref a) => write!(fmt, "Len({a:?})"),
- Cast(ref kind, ref place, ref ty) => {
- write!(fmt, "{place:?} as {ty:?} ({kind:?})")
- }
- BinaryOp(ref op, box (ref a, ref b)) => write!(fmt, "{op:?}({a:?}, {b:?})"),
- CheckedBinaryOp(ref op, box (ref a, ref b)) => {
- write!(fmt, "Checked{op:?}({a:?}, {b:?})")
- }
- UnaryOp(ref op, ref a) => write!(fmt, "{op:?}({a:?})"),
- Discriminant(ref place) => write!(fmt, "discriminant({place:?})"),
- NullaryOp(ref op, ref t) => match op {
- NullOp::SizeOf => write!(fmt, "SizeOf({t:?})"),
- NullOp::AlignOf => write!(fmt, "AlignOf({t:?})"),
- NullOp::OffsetOf(fields) => write!(fmt, "OffsetOf({t:?}, {fields:?})"),
- },
- ThreadLocalRef(did) => ty::tls::with(|tcx| {
- let muta = tcx.static_mutability(did).unwrap().prefix_str();
- write!(fmt, "&/*tls*/ {}{}", muta, tcx.def_path_str(did))
- }),
- Ref(region, borrow_kind, ref place) => {
- let kind_str = match borrow_kind {
- BorrowKind::Shared => "",
- BorrowKind::Shallow => "shallow ",
- BorrowKind::Mut { .. } => "mut ",
- };
-
- // When printing regions, add trailing space if necessary.
- let print_region = ty::tls::with(|tcx| {
- tcx.sess.verbose() || tcx.sess.opts.unstable_opts.identify_regions
- });
- let region = if print_region {
- let mut region = region.to_string();
- if !region.is_empty() {
- region.push(' ');
- }
- region
- } else {
- // Do not even print 'static
- String::new()
- };
- write!(fmt, "&{region}{kind_str}{place:?}")
- }
-
- CopyForDeref(ref place) => write!(fmt, "deref_copy {place:#?}"),
-
- AddressOf(mutability, ref place) => {
- let kind_str = match mutability {
- Mutability::Mut => "mut",
- Mutability::Not => "const",
- };
-
- write!(fmt, "&raw {kind_str} {place:?}")
- }
-
- Aggregate(ref kind, ref places) => {
- let fmt_tuple = |fmt: &mut Formatter<'_>, name: &str| {
- let mut tuple_fmt = fmt.debug_tuple(name);
- for place in places {
- tuple_fmt.field(place);
- }
- tuple_fmt.finish()
- };
-
- match **kind {
- AggregateKind::Array(_) => write!(fmt, "{places:?}"),
-
- AggregateKind::Tuple => {
- if places.is_empty() {
- write!(fmt, "()")
- } else {
- fmt_tuple(fmt, "")
- }
- }
-
- AggregateKind::Adt(adt_did, variant, args, _user_ty, _) => {
- ty::tls::with(|tcx| {
- let variant_def = &tcx.adt_def(adt_did).variant(variant);
- let args = tcx.lift(args).expect("could not lift for printing");
- let name = FmtPrinter::new(tcx, Namespace::ValueNS)
- .print_def_path(variant_def.def_id, args)?
- .into_buffer();
-
- match variant_def.ctor_kind() {
- Some(CtorKind::Const) => fmt.write_str(&name),
- Some(CtorKind::Fn) => fmt_tuple(fmt, &name),
- None => {
- let mut struct_fmt = fmt.debug_struct(&name);
- for (field, place) in iter::zip(&variant_def.fields, places) {
- struct_fmt.field(field.name.as_str(), place);
- }
- struct_fmt.finish()
- }
- }
- })
- }
-
- AggregateKind::Closure(def_id, args) => ty::tls::with(|tcx| {
- let name = if tcx.sess.opts.unstable_opts.span_free_formats {
- let args = tcx.lift(args).unwrap();
- format!("[closure@{}]", tcx.def_path_str_with_args(def_id, args),)
- } else {
- let span = tcx.def_span(def_id);
- format!(
- "[closure@{}]",
- tcx.sess.source_map().span_to_diagnostic_string(span)
- )
- };
- let mut struct_fmt = fmt.debug_struct(&name);
-
- // FIXME(project-rfc-2229#48): This should be a list of capture names/places
- if let Some(def_id) = def_id.as_local()
- && let Some(upvars) = tcx.upvars_mentioned(def_id)
- {
- for (&var_id, place) in iter::zip(upvars.keys(), places) {
- let var_name = tcx.hir().name(var_id);
- struct_fmt.field(var_name.as_str(), place);
- }
- } else {
- for (index, place) in places.iter().enumerate() {
- struct_fmt.field(&format!("{index}"), place);
- }
- }
-
- struct_fmt.finish()
- }),
-
- AggregateKind::Generator(def_id, _, _) => ty::tls::with(|tcx| {
- let name = format!("[generator@{:?}]", tcx.def_span(def_id));
- let mut struct_fmt = fmt.debug_struct(&name);
-
- // FIXME(project-rfc-2229#48): This should be a list of capture names/places
- if let Some(def_id) = def_id.as_local()
- && let Some(upvars) = tcx.upvars_mentioned(def_id)
- {
- for (&var_id, place) in iter::zip(upvars.keys(), places) {
- let var_name = tcx.hir().name(var_id);
- struct_fmt.field(var_name.as_str(), place);
- }
- } else {
- for (index, place) in places.iter().enumerate() {
- struct_fmt.field(&format!("{index}"), place);
- }
- }
-
- struct_fmt.finish()
- }),
- }
- }
-
- ShallowInitBox(ref place, ref ty) => {
- write!(fmt, "ShallowInitBox({place:?}, {ty:?})")
- }
- }
- }
-}
-
-///////////////////////////////////////////////////////////////////////////
-/// Constants
-///
-/// Two constants are equal if they are the same constant. Note that
-/// this does not necessarily mean that they are `==` in Rust. In
-/// particular, one must be wary of `NaN`!
-
-#[derive(Clone, Copy, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
-#[derive(TypeFoldable, TypeVisitable)]
-pub struct Constant<'tcx> {
- pub span: Span,
-
- /// Optional user-given type: for something like
- /// `collect::<Vec<_>>`, this would be present and would
- /// indicate that `Vec<_>` was explicitly specified.
- ///
- /// Needed for NLL to impose user-given type constraints.
- pub user_ty: Option<UserTypeAnnotationIndex>,
-
- pub literal: ConstantKind<'tcx>,
-}
-
-#[derive(Clone, Copy, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable, Debug)]
-#[derive(Lift, TypeFoldable, TypeVisitable)]
-pub enum ConstantKind<'tcx> {
- /// This constant came from the type system
- Ty(ty::Const<'tcx>),
-
- /// An unevaluated mir constant which is not part of the type system.
- Unevaluated(UnevaluatedConst<'tcx>, Ty<'tcx>),
-
- /// This constant cannot go back into the type system, as it represents
- /// something the type system cannot handle (e.g. pointers).
- Val(interpret::ConstValue<'tcx>, Ty<'tcx>),
-}
-
-impl<'tcx> Constant<'tcx> {
- pub fn check_static_ptr(&self, tcx: TyCtxt<'_>) -> Option<DefId> {
- match self.literal.try_to_scalar() {
- Some(Scalar::Ptr(ptr, _size)) => match tcx.global_alloc(ptr.provenance) {
- GlobalAlloc::Static(def_id) => {
- assert!(!tcx.is_thread_local_static(def_id));
- Some(def_id)
- }
- _ => None,
- },
- _ => None,
- }
- }
- #[inline]
- pub fn ty(&self) -> Ty<'tcx> {
- self.literal.ty()
- }
-}
-
-impl<'tcx> ConstantKind<'tcx> {
- #[inline(always)]
- pub fn ty(&self) -> Ty<'tcx> {
- match self {
- ConstantKind::Ty(c) => c.ty(),
- ConstantKind::Val(_, ty) | ConstantKind::Unevaluated(_, ty) => *ty,
- }
- }
-
- #[inline]
- pub fn try_to_value(self, tcx: TyCtxt<'tcx>) -> Option<interpret::ConstValue<'tcx>> {
- match self {
- ConstantKind::Ty(c) => match c.kind() {
- ty::ConstKind::Value(valtree) => Some(tcx.valtree_to_const_val((c.ty(), valtree))),
- _ => None,
- },
- ConstantKind::Val(val, _) => Some(val),
- ConstantKind::Unevaluated(..) => None,
- }
- }
-
- #[inline]
- pub fn try_to_scalar(self) -> Option<Scalar> {
- match self {
- ConstantKind::Ty(c) => match c.kind() {
- ty::ConstKind::Value(valtree) => match valtree {
- ty::ValTree::Leaf(scalar_int) => Some(Scalar::Int(scalar_int)),
- ty::ValTree::Branch(_) => None,
- },
- _ => None,
- },
- ConstantKind::Val(val, _) => val.try_to_scalar(),
- ConstantKind::Unevaluated(..) => None,
- }
- }
-
- #[inline]
- pub fn try_to_scalar_int(self) -> Option<ScalarInt> {
- Some(self.try_to_scalar()?.assert_int())
- }
-
- #[inline]
- pub fn try_to_bits(self, size: Size) -> Option<u128> {
- self.try_to_scalar_int()?.to_bits(size).ok()
- }
-
- #[inline]
- pub fn try_to_bool(self) -> Option<bool> {
- self.try_to_scalar_int()?.try_into().ok()
- }
-
- #[inline]
- pub fn eval(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self {
- match self {
- Self::Ty(c) => {
- if let Some(val) = c.try_eval_for_mir(tcx, param_env) {
- match val {
- Ok(val) => Self::Val(val, c.ty()),
- Err(guar) => Self::Ty(ty::Const::new_error(tcx, guar, self.ty())),
- }
- } else {
- self
- }
- }
- Self::Val(_, _) => self,
- Self::Unevaluated(uneval, ty) => {
- // FIXME: We might want to have a `try_eval`-like function on `Unevaluated`
- match tcx.const_eval_resolve(param_env, uneval, None) {
- Ok(val) => Self::Val(val, ty),
- Err(ErrorHandled::TooGeneric) => self,
- Err(ErrorHandled::Reported(guar)) => {
- Self::Ty(ty::Const::new_error(tcx, guar.into(), ty))
- }
- }
- }
- }
- }
-
- /// Panics if the value cannot be evaluated or doesn't contain a valid integer of the given type.
- #[inline]
- pub fn eval_bits(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) -> u128 {
- self.try_eval_bits(tcx, param_env, ty)
- .unwrap_or_else(|| bug!("expected bits of {:#?}, got {:#?}", ty, self))
- }
-
- #[inline]
- pub fn try_eval_bits(
- &self,
- tcx: TyCtxt<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- ty: Ty<'tcx>,
- ) -> Option<u128> {
- match self {
- Self::Ty(ct) => ct.try_eval_bits(tcx, param_env, ty),
- Self::Val(val, t) => {
- assert_eq!(*t, ty);
- let size =
- tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
- val.try_to_bits(size)
- }
- Self::Unevaluated(uneval, ty) => {
- match tcx.const_eval_resolve(param_env, *uneval, None) {
- Ok(val) => {
- let size = tcx
- .layout_of(param_env.with_reveal_all_normalized(tcx).and(*ty))
- .ok()?
- .size;
- val.try_to_bits(size)
- }
- Err(_) => None,
- }
- }
- }
- }
-
- #[inline]
- pub fn try_eval_bool(&self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Option<bool> {
- match self {
- Self::Ty(ct) => ct.try_eval_bool(tcx, param_env),
- Self::Val(val, _) => val.try_to_bool(),
- Self::Unevaluated(uneval, _) => {
- match tcx.const_eval_resolve(param_env, *uneval, None) {
- Ok(val) => val.try_to_bool(),
- Err(_) => None,
- }
- }
- }
- }
-
- #[inline]
- pub fn try_eval_target_usize(
- &self,
- tcx: TyCtxt<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- ) -> Option<u64> {
- match self {
- Self::Ty(ct) => ct.try_eval_target_usize(tcx, param_env),
- Self::Val(val, _) => val.try_to_target_usize(tcx),
- Self::Unevaluated(uneval, _) => {
- match tcx.const_eval_resolve(param_env, *uneval, None) {
- Ok(val) => val.try_to_target_usize(tcx),
- Err(_) => None,
- }
- }
- }
- }
-
- #[inline]
- pub fn from_value(val: ConstValue<'tcx>, ty: Ty<'tcx>) -> Self {
- Self::Val(val, ty)
- }
-
- pub fn from_bits(
- tcx: TyCtxt<'tcx>,
- bits: u128,
- param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
- ) -> Self {
- let size = tcx
- .layout_of(param_env_ty)
- .unwrap_or_else(|e| {
- bug!("could not compute layout for {:?}: {:?}", param_env_ty.value, e)
- })
- .size;
- let cv = ConstValue::Scalar(Scalar::from_uint(bits, size));
-
- Self::Val(cv, param_env_ty.value)
- }
-
- #[inline]
- pub fn from_bool(tcx: TyCtxt<'tcx>, v: bool) -> Self {
- let cv = ConstValue::from_bool(v);
- Self::Val(cv, tcx.types.bool)
- }
-
- #[inline]
- pub fn zero_sized(ty: Ty<'tcx>) -> Self {
- let cv = ConstValue::ZeroSized;
- Self::Val(cv, ty)
- }
-
- pub fn from_usize(tcx: TyCtxt<'tcx>, n: u64) -> Self {
- let ty = tcx.types.usize;
- Self::from_bits(tcx, n as u128, ty::ParamEnv::empty().and(ty))
- }
-
- #[inline]
- pub fn from_scalar(_tcx: TyCtxt<'tcx>, s: Scalar, ty: Ty<'tcx>) -> Self {
- let val = ConstValue::Scalar(s);
- Self::Val(val, ty)
- }
-
- /// Literals are converted to `ConstantKindVal`, const generic parameters are eagerly
- /// converted to a constant, everything else becomes `Unevaluated`.
- #[instrument(skip(tcx), level = "debug", ret)]
- pub fn from_anon_const(
- tcx: TyCtxt<'tcx>,
- def: LocalDefId,
- param_env: ty::ParamEnv<'tcx>,
- ) -> Self {
- let body_id = match tcx.hir().get_by_def_id(def) {
- hir::Node::AnonConst(ac) => ac.body,
- _ => {
- span_bug!(tcx.def_span(def), "from_anon_const can only process anonymous constants")
- }
- };
-
- let expr = &tcx.hir().body(body_id).value;
- debug!(?expr);
-
- // Unwrap a block, so that e.g. `{ P }` is recognised as a parameter. Const arguments
- // currently have to be wrapped in curly brackets, so it's necessary to special-case.
- let expr = match &expr.kind {
- hir::ExprKind::Block(block, _) if block.stmts.is_empty() && block.expr.is_some() => {
- block.expr.as_ref().unwrap()
- }
- _ => expr,
- };
- debug!("expr.kind: {:?}", expr.kind);
-
- let ty = tcx.type_of(def).instantiate_identity();
- debug!(?ty);
-
- // FIXME(const_generics): We currently have to special case parameters because `min_const_generics`
- // does not provide the parents generics to anonymous constants. We still allow generic const
- // parameters by themselves however, e.g. `N`. These constants would cause an ICE if we were to
- // ever try to substitute the generic parameters in their bodies.
- //
- // While this doesn't happen as these constants are always used as `ty::ConstKind::Param`, it does
- // cause issues if we were to remove that special-case and try to evaluate the constant instead.
- use hir::{def::DefKind::ConstParam, def::Res, ExprKind, Path, QPath};
- match expr.kind {
- ExprKind::Path(QPath::Resolved(_, &Path { res: Res::Def(ConstParam, def_id), .. })) => {
- // Find the name and index of the const parameter by indexing the generics of
- // the parent item and construct a `ParamConst`.
- let item_def_id = tcx.parent(def_id);
- let generics = tcx.generics_of(item_def_id);
- let index = generics.param_def_id_to_index[&def_id];
- let name = tcx.item_name(def_id);
- let ty_const = ty::Const::new_param(tcx, ty::ParamConst::new(index, name), ty);
- debug!(?ty_const);
-
- return Self::Ty(ty_const);
- }
- _ => {}
- }
-
- let hir_id = tcx.hir().local_def_id_to_hir_id(def);
- let parent_args = if let Some(parent_hir_id) = tcx.hir().opt_parent_id(hir_id)
- && let Some(parent_did) = parent_hir_id.as_owner()
- {
- GenericArgs::identity_for_item(tcx, parent_did)
- } else {
- List::empty()
- };
- debug!(?parent_args);
-
- let did = def.to_def_id();
- let child_args = GenericArgs::identity_for_item(tcx, did);
- let args = tcx.mk_args_from_iter(parent_args.into_iter().chain(child_args.into_iter()));
- debug!(?args);
-
- let span = tcx.def_span(def);
- let uneval = UnevaluatedConst::new(did, args);
- debug!(?span, ?param_env);
-
- match tcx.const_eval_resolve(param_env, uneval, Some(span)) {
- Ok(val) => {
- debug!("evaluated const value");
- Self::Val(val, ty)
- }
- Err(_) => {
- debug!("error encountered during evaluation");
- // Error was handled in `const_eval_resolve`. Here we just create a
- // new unevaluated const and error hard later in codegen
- Self::Unevaluated(
- UnevaluatedConst {
- def: did,
- args: GenericArgs::identity_for_item(tcx, did),
- promoted: None,
- },
- ty,
- )
- }
- }
- }
-
- pub fn from_const(c: ty::Const<'tcx>, tcx: TyCtxt<'tcx>) -> Self {
- match c.kind() {
- ty::ConstKind::Value(valtree) => {
- let const_val = tcx.valtree_to_const_val((c.ty(), valtree));
- Self::Val(const_val, c.ty())
- }
- ty::ConstKind::Unevaluated(uv) => Self::Unevaluated(uv.expand(), c.ty()),
- _ => Self::Ty(c),
- }
- }
-}
-
-/// An unevaluated (potentially generic) constant used in MIR.
-#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Lift)]
-#[derive(Hash, HashStable, TypeFoldable, TypeVisitable)]
-pub struct UnevaluatedConst<'tcx> {
- pub def: DefId,
- pub args: GenericArgsRef<'tcx>,
- pub promoted: Option<Promoted>,
-}
-
-impl<'tcx> UnevaluatedConst<'tcx> {
- #[inline]
- pub fn shrink(self) -> ty::UnevaluatedConst<'tcx> {
- assert_eq!(self.promoted, None);
- ty::UnevaluatedConst { def: self.def, args: self.args }
- }
-}
-
-impl<'tcx> UnevaluatedConst<'tcx> {
- #[inline]
- pub fn new(def: DefId, args: GenericArgsRef<'tcx>) -> UnevaluatedConst<'tcx> {
- UnevaluatedConst { def, args, promoted: Default::default() }
- }
-}
-
/// A collection of projections into user types.
///
/// They are projections because a binding can occur a part of a
/// parent pattern that has been ascribed a type.
///
-/// Its a collection because there can be multiple type ascriptions on
+/// It's a collection because there can be multiple type ascriptions on
/// the path from the root of the pattern down to the binding itself.
///
/// An example:
@@ -2747,220 +1545,6 @@ rustc_index::newtype_index! {
pub struct Promoted {}
}
-impl<'tcx> Debug for Constant<'tcx> {
- fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
- write!(fmt, "{self}")
- }
-}
-
-impl<'tcx> Display for Constant<'tcx> {
- fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
- match self.ty().kind() {
- ty::FnDef(..) => {}
- _ => write!(fmt, "const ")?,
- }
- Display::fmt(&self.literal, fmt)
- }
-}
-
-impl<'tcx> Display for ConstantKind<'tcx> {
- fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
- match *self {
- ConstantKind::Ty(c) => pretty_print_const(c, fmt, true),
- ConstantKind::Val(val, ty) => pretty_print_const_value(val, ty, fmt),
- // FIXME(valtrees): Correctly print mir constants.
- ConstantKind::Unevaluated(..) => {
- fmt.write_str("_")?;
- Ok(())
- }
- }
- }
-}
-
-fn pretty_print_const<'tcx>(
- c: ty::Const<'tcx>,
- fmt: &mut Formatter<'_>,
- print_types: bool,
-) -> fmt::Result {
- use crate::ty::print::PrettyPrinter;
- ty::tls::with(|tcx| {
- let literal = tcx.lift(c).unwrap();
- let mut cx = FmtPrinter::new(tcx, Namespace::ValueNS);
- cx.print_alloc_ids = true;
- let cx = cx.pretty_print_const(literal, print_types)?;
- fmt.write_str(&cx.into_buffer())?;
- Ok(())
- })
-}
-
-fn pretty_print_byte_str(fmt: &mut Formatter<'_>, byte_str: &[u8]) -> fmt::Result {
- write!(fmt, "b\"{}\"", byte_str.escape_ascii())
-}
-
-fn comma_sep<'tcx>(
- fmt: &mut Formatter<'_>,
- elems: Vec<(ConstValue<'tcx>, Ty<'tcx>)>,
-) -> fmt::Result {
- let mut first = true;
- for (ct, ty) in elems {
- if !first {
- fmt.write_str(", ")?;
- }
- pretty_print_const_value(ct, ty, fmt)?;
- first = false;
- }
- Ok(())
-}
-
-// FIXME: Move that into `mir/pretty.rs`.
-fn pretty_print_const_value<'tcx>(
- ct: ConstValue<'tcx>,
- ty: Ty<'tcx>,
- fmt: &mut Formatter<'_>,
-) -> fmt::Result {
- use crate::ty::print::PrettyPrinter;
-
- ty::tls::with(|tcx| {
- let ct = tcx.lift(ct).unwrap();
- let ty = tcx.lift(ty).unwrap();
-
- if tcx.sess.verbose() {
- fmt.write_str(&format!("ConstValue({ct:?}: {ty})"))?;
- return Ok(());
- }
-
- let u8_type = tcx.types.u8;
- match (ct, ty.kind()) {
- // Byte/string slices, printed as (byte) string literals.
- (ConstValue::Slice { data, start, end }, ty::Ref(_, inner, _)) => {
- match inner.kind() {
- ty::Slice(t) => {
- if *t == u8_type {
- // The `inspect` here is okay since we checked the bounds, and `u8` carries
- // no provenance (we have an active slice reference here). We don't use
- // this result to affect interpreter execution.
- let byte_str = data
- .inner()
- .inspect_with_uninit_and_ptr_outside_interpreter(start..end);
- pretty_print_byte_str(fmt, byte_str)?;
- return Ok(());
- }
- }
- ty::Str => {
- // The `inspect` here is okay since we checked the bounds, and `str` carries
- // no provenance (we have an active `str` reference here). We don't use this
- // result to affect interpreter execution.
- let slice = data
- .inner()
- .inspect_with_uninit_and_ptr_outside_interpreter(start..end);
- fmt.write_str(&format!("{:?}", String::from_utf8_lossy(slice)))?;
- return Ok(());
- }
- _ => {}
- }
- }
- (ConstValue::ByRef { alloc, offset }, ty::Array(t, n)) if *t == u8_type => {
- let n = n.try_to_bits(tcx.data_layout.pointer_size).unwrap();
- // cast is ok because we already checked for pointer size (32 or 64 bit) above
- let range = AllocRange { start: offset, size: Size::from_bytes(n) };
- let byte_str = alloc.inner().get_bytes_strip_provenance(&tcx, range).unwrap();
- fmt.write_str("*")?;
- pretty_print_byte_str(fmt, byte_str)?;
- return Ok(());
- }
- // Aggregates, printed as array/tuple/struct/variant construction syntax.
- //
- // NB: the `has_non_region_param` check ensures that we can use
- // the `destructure_const` query with an empty `ty::ParamEnv` without
- // introducing ICEs (e.g. via `layout_of`) from missing bounds.
- // E.g. `transmute([0usize; 2]): (u8, *mut T)` needs to know `T: Sized`
- // to be able to destructure the tuple into `(0u8, *mut T)`
- (_, ty::Array(..) | ty::Tuple(..) | ty::Adt(..)) if !ty.has_non_region_param() => {
- let ct = tcx.lift(ct).unwrap();
- let ty = tcx.lift(ty).unwrap();
- if let Some(contents) = tcx.try_destructure_mir_constant_for_diagnostics((ct, ty)) {
- let fields: Vec<(ConstValue<'_>, Ty<'_>)> = contents.fields.to_vec();
- match *ty.kind() {
- ty::Array(..) => {
- fmt.write_str("[")?;
- comma_sep(fmt, fields)?;
- fmt.write_str("]")?;
- }
- ty::Tuple(..) => {
- fmt.write_str("(")?;
- comma_sep(fmt, fields)?;
- if contents.fields.len() == 1 {
- fmt.write_str(",")?;
- }
- fmt.write_str(")")?;
- }
- ty::Adt(def, _) if def.variants().is_empty() => {
- fmt.write_str(&format!("{{unreachable(): {ty}}}"))?;
- }
- ty::Adt(def, args) => {
- let variant_idx = contents
- .variant
- .expect("destructed mir constant of adt without variant idx");
- let variant_def = &def.variant(variant_idx);
- let args = tcx.lift(args).unwrap();
- let mut cx = FmtPrinter::new(tcx, Namespace::ValueNS);
- cx.print_alloc_ids = true;
- let cx = cx.print_value_path(variant_def.def_id, args)?;
- fmt.write_str(&cx.into_buffer())?;
-
- match variant_def.ctor_kind() {
- Some(CtorKind::Const) => {}
- Some(CtorKind::Fn) => {
- fmt.write_str("(")?;
- comma_sep(fmt, fields)?;
- fmt.write_str(")")?;
- }
- None => {
- fmt.write_str(" {{ ")?;
- let mut first = true;
- for (field_def, (ct, ty)) in
- iter::zip(&variant_def.fields, fields)
- {
- if !first {
- fmt.write_str(", ")?;
- }
- write!(fmt, "{}: ", field_def.name)?;
- pretty_print_const_value(ct, ty, fmt)?;
- first = false;
- }
- fmt.write_str(" }}")?;
- }
- }
- }
- _ => unreachable!(),
- }
- return Ok(());
- }
- }
- (ConstValue::Scalar(scalar), _) => {
- let mut cx = FmtPrinter::new(tcx, Namespace::ValueNS);
- cx.print_alloc_ids = true;
- let ty = tcx.lift(ty).unwrap();
- cx = cx.pretty_print_const_scalar(scalar, ty)?;
- fmt.write_str(&cx.into_buffer())?;
- return Ok(());
- }
- (ConstValue::ZeroSized, ty::FnDef(d, s)) => {
- let mut cx = FmtPrinter::new(tcx, Namespace::ValueNS);
- cx.print_alloc_ids = true;
- let cx = cx.print_value_path(*d, s)?;
- fmt.write_str(&cx.into_buffer())?;
- return Ok(());
- }
- // FIXME(oli-obk): also pretty print arrays and other aggregate constants by reading
- // their fields instead of just dumping the memory.
- _ => {}
- }
- // Fall back to debug pretty printing for invalid constants.
- write!(fmt, "{ct:?}: {ty}")
- })
-}
-
/// `Location` represents the position of the start of the statement; or, if
/// `statement_index` equals the number of statements, then the start of the
/// terminator.
@@ -3043,6 +1627,6 @@ mod size_asserts {
static_assert_size!(StatementKind<'_>, 16);
static_assert_size!(Terminator<'_>, 104);
static_assert_size!(TerminatorKind<'_>, 88);
- static_assert_size!(VarDebugInfo<'_>, 80);
+ static_assert_size!(VarDebugInfo<'_>, 88);
// tidy-alphabetical-end
}
diff --git a/compiler/rustc_middle/src/mir/mono.rs b/compiler/rustc_middle/src/mir/mono.rs
index 8fd980d5a..403e80bd3 100644
--- a/compiler/rustc_middle/src/mir/mono.rs
+++ b/compiler/rustc_middle/src/mir/mono.rs
@@ -78,9 +78,11 @@ impl<'tcx> MonoItem<'tcx> {
}
}
- pub fn is_generic_fn(&self) -> bool {
- match *self {
- MonoItem::Fn(ref instance) => instance.args.non_erasable_generics().next().is_some(),
+ pub fn is_generic_fn(&self, tcx: TyCtxt<'tcx>) -> bool {
+ match self {
+ MonoItem::Fn(instance) => {
+ instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some()
+ }
MonoItem::Static(..) | MonoItem::GlobalAsm(..) => false,
}
}
diff --git a/compiler/rustc_middle/src/mir/patch.rs b/compiler/rustc_middle/src/mir/patch.rs
index c4c3341f8..da486c346 100644
--- a/compiler/rustc_middle/src/mir/patch.rs
+++ b/compiler/rustc_middle/src/mir/patch.rs
@@ -14,7 +14,8 @@ pub struct MirPatch<'tcx> {
resume_block: Option<BasicBlock>,
// Only for unreachable in cleanup path.
unreachable_cleanup_block: Option<BasicBlock>,
- terminate_block: Option<BasicBlock>,
+ // Cached block for UnwindTerminate (with reason)
+ terminate_block: Option<(BasicBlock, UnwindTerminateReason)>,
body_span: Span,
next_local: usize,
}
@@ -35,13 +36,15 @@ impl<'tcx> MirPatch<'tcx> {
for (bb, block) in body.basic_blocks.iter_enumerated() {
// Check if we already have a resume block
- if let TerminatorKind::Resume = block.terminator().kind && block.statements.is_empty() {
+ if matches!(block.terminator().kind, TerminatorKind::UnwindResume)
+ && block.statements.is_empty()
+ {
result.resume_block = Some(bb);
continue;
}
// Check if we already have an unreachable block
- if let TerminatorKind::Unreachable = block.terminator().kind
+ if matches!(block.terminator().kind, TerminatorKind::Unreachable)
&& block.statements.is_empty()
&& block.is_cleanup
{
@@ -50,8 +53,10 @@ impl<'tcx> MirPatch<'tcx> {
}
// Check if we already have a terminate block
- if let TerminatorKind::Terminate = block.terminator().kind && block.statements.is_empty() {
- result.terminate_block = Some(bb);
+ if let TerminatorKind::UnwindTerminate(reason) = block.terminator().kind
+ && block.statements.is_empty()
+ {
+ result.terminate_block = Some((bb, reason));
continue;
}
}
@@ -68,7 +73,7 @@ impl<'tcx> MirPatch<'tcx> {
statements: vec![],
terminator: Some(Terminator {
source_info: SourceInfo::outermost(self.body_span),
- kind: TerminatorKind::Resume,
+ kind: TerminatorKind::UnwindResume,
}),
is_cleanup: true,
});
@@ -93,20 +98,20 @@ impl<'tcx> MirPatch<'tcx> {
bb
}
- pub fn terminate_block(&mut self) -> BasicBlock {
- if let Some(bb) = self.terminate_block {
- return bb;
+ pub fn terminate_block(&mut self, reason: UnwindTerminateReason) -> BasicBlock {
+ if let Some((cached_bb, cached_reason)) = self.terminate_block && reason == cached_reason {
+ return cached_bb;
}
let bb = self.new_block(BasicBlockData {
statements: vec![],
terminator: Some(Terminator {
source_info: SourceInfo::outermost(self.body_span),
- kind: TerminatorKind::Terminate,
+ kind: TerminatorKind::UnwindTerminate(reason),
}),
is_cleanup: true,
});
- self.terminate_block = Some(bb);
+ self.terminate_block = Some((bb, reason));
bb
}
diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs
index 773056e8a..f032fd29d 100644
--- a/compiler/rustc_middle/src/mir/pretty.rs
+++ b/compiler/rustc_middle/src/mir/pretty.rs
@@ -1,19 +1,19 @@
use std::collections::BTreeSet;
-use std::fmt::Display;
-use std::fmt::Write as _;
+use std::fmt::{self, Debug, Display, Write as _};
use std::fs;
-use std::io::{self, Write};
+use std::io::{self, Write as _};
use std::path::{Path, PathBuf};
use super::graphviz::write_mir_fn_graphviz;
use super::spanview::write_mir_fn_spanview;
use either::Either;
+use rustc_ast::InlineAsmTemplatePiece;
use rustc_data_structures::fx::FxHashMap;
use rustc_hir::def_id::DefId;
use rustc_index::Idx;
use rustc_middle::mir::interpret::{
- alloc_range, read_target_uint, AllocBytes, AllocId, Allocation, ConstAllocation, ConstValue,
- GlobalAlloc, Pointer, Provenance,
+ alloc_range, read_target_uint, AllocBytes, AllocId, Allocation, ConstAllocation, GlobalAlloc,
+ Pointer, Provenance,
};
use rustc_middle::mir::visit::Visitor;
use rustc_middle::mir::*;
@@ -79,7 +79,7 @@ pub fn dump_mir<'tcx, F>(
body: &Body<'tcx>,
extra_data: F,
) where
- F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+ F: FnMut(PassWhere, &mut dyn io::Write) -> io::Result<()>,
{
if !dump_enabled(tcx, pass_name, body.source.def_id()) {
return;
@@ -116,7 +116,7 @@ fn dump_matched_mir_node<'tcx, F>(
body: &Body<'tcx>,
mut extra_data: F,
) where
- F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+ F: FnMut(PassWhere, &mut dyn io::Write) -> io::Result<()>,
{
let _: io::Result<()> = try {
let mut file = create_dump_file(tcx, "mir", pass_num, pass_name, disambiguator, body)?;
@@ -260,11 +260,14 @@ pub fn create_dump_file<'tcx>(
)
}
+///////////////////////////////////////////////////////////////////////////
+// Whole MIR bodies
+
/// Write out a human-readable textual representation for the given MIR.
pub fn write_mir_pretty<'tcx>(
tcx: TyCtxt<'tcx>,
single: Option<DefId>,
- w: &mut dyn Write,
+ w: &mut dyn io::Write,
) -> io::Result<()> {
writeln!(w, "// WARNING: This output format is intended for human consumers only")?;
writeln!(w, "// and is subject to change without notice. Knock yourself out.")?;
@@ -278,7 +281,7 @@ pub fn write_mir_pretty<'tcx>(
writeln!(w)?;
}
- let render_body = |w: &mut dyn Write, body| -> io::Result<()> {
+ let render_body = |w: &mut dyn io::Write, body| -> io::Result<()> {
write_mir_fn(tcx, body, &mut |_, _| Ok(()), w)?;
for body in tcx.promoted_mir(def_id) {
@@ -309,10 +312,10 @@ pub fn write_mir_fn<'tcx, F>(
tcx: TyCtxt<'tcx>,
body: &Body<'tcx>,
extra_data: &mut F,
- w: &mut dyn Write,
+ w: &mut dyn io::Write,
) -> io::Result<()>
where
- F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+ F: FnMut(PassWhere, &mut dyn io::Write) -> io::Result<()>,
{
write_mir_intro(tcx, body, w)?;
for block in body.basic_blocks.indices() {
@@ -330,16 +333,267 @@ where
Ok(())
}
+/// Prints local variables in a scope tree.
+fn write_scope_tree(
+ tcx: TyCtxt<'_>,
+ body: &Body<'_>,
+ scope_tree: &FxHashMap<SourceScope, Vec<SourceScope>>,
+ w: &mut dyn io::Write,
+ parent: SourceScope,
+ depth: usize,
+) -> io::Result<()> {
+ let indent = depth * INDENT.len();
+
+ // Local variable debuginfo.
+ for var_debug_info in &body.var_debug_info {
+ if var_debug_info.source_info.scope != parent {
+ // Not declared in this scope.
+ continue;
+ }
+
+ let indented_debug_info = format!("{0:1$}debug {2:?};", INDENT, indent, var_debug_info);
+
+ if tcx.sess.opts.unstable_opts.mir_include_spans {
+ writeln!(
+ w,
+ "{0:1$} // in {2}",
+ indented_debug_info,
+ ALIGN,
+ comment(tcx, var_debug_info.source_info),
+ )?;
+ } else {
+ writeln!(w, "{indented_debug_info}")?;
+ }
+ }
+
+ // Local variable types.
+ for (local, local_decl) in body.local_decls.iter_enumerated() {
+ if (1..body.arg_count + 1).contains(&local.index()) {
+ // Skip over argument locals, they're printed in the signature.
+ continue;
+ }
+
+ if local_decl.source_info.scope != parent {
+ // Not declared in this scope.
+ continue;
+ }
+
+ let mut_str = local_decl.mutability.prefix_str();
+
+ let mut indented_decl = ty::print::with_no_trimmed_paths!(format!(
+ "{0:1$}let {2}{3:?}: {4}",
+ INDENT, indent, mut_str, local, local_decl.ty
+ ));
+ if let Some(user_ty) = &local_decl.user_ty {
+ for user_ty in user_ty.projections() {
+ write!(indented_decl, " as {user_ty:?}").unwrap();
+ }
+ }
+ indented_decl.push(';');
+
+ let local_name = if local == RETURN_PLACE { " return place" } else { "" };
+
+ if tcx.sess.opts.unstable_opts.mir_include_spans {
+ writeln!(
+ w,
+ "{0:1$} //{2} in {3}",
+ indented_decl,
+ ALIGN,
+ local_name,
+ comment(tcx, local_decl.source_info),
+ )?;
+ } else {
+ writeln!(w, "{indented_decl}",)?;
+ }
+ }
+
+ let Some(children) = scope_tree.get(&parent) else {
+ return Ok(());
+ };
+
+ for &child in children {
+ let child_data = &body.source_scopes[child];
+ assert_eq!(child_data.parent_scope, Some(parent));
+
+ let (special, span) = if let Some((callee, callsite_span)) = child_data.inlined {
+ (
+ format!(
+ " (inlined {}{})",
+ if callee.def.requires_caller_location(tcx) { "#[track_caller] " } else { "" },
+ callee
+ ),
+ Some(callsite_span),
+ )
+ } else {
+ (String::new(), None)
+ };
+
+ let indented_header = format!("{0:1$}scope {2}{3} {{", "", indent, child.index(), special);
+
+ if tcx.sess.opts.unstable_opts.mir_include_spans {
+ if let Some(span) = span {
+ writeln!(
+ w,
+ "{0:1$} // at {2}",
+ indented_header,
+ ALIGN,
+ tcx.sess.source_map().span_to_embeddable_string(span),
+ )?;
+ } else {
+ writeln!(w, "{indented_header}")?;
+ }
+ } else {
+ writeln!(w, "{indented_header}")?;
+ }
+
+ write_scope_tree(tcx, body, scope_tree, w, child, depth + 1)?;
+ writeln!(w, "{0:1$}}}", "", depth * INDENT.len())?;
+ }
+
+ Ok(())
+}
+
+impl Debug for VarDebugInfo<'_> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ if let Some(box VarDebugInfoFragment { ty, ref projection }) = self.composite {
+ pre_fmt_projection(&projection[..], fmt)?;
+ write!(fmt, "({}: {})", self.name, ty)?;
+ post_fmt_projection(&projection[..], fmt)?;
+ } else {
+ write!(fmt, "{}", self.name)?;
+ }
+
+ write!(fmt, " => {:?}", self.value)
+ }
+}
+
+/// Write out a human-readable textual representation of the MIR's `fn` type and the types of its
+/// local variables (both user-defined bindings and compiler temporaries).
+pub fn write_mir_intro<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'_>,
+ w: &mut dyn io::Write,
+) -> io::Result<()> {
+ write_mir_sig(tcx, body, w)?;
+ writeln!(w, "{{")?;
+
+ // construct a scope tree and write it out
+ let mut scope_tree: FxHashMap<SourceScope, Vec<SourceScope>> = Default::default();
+ for (index, scope_data) in body.source_scopes.iter().enumerate() {
+ if let Some(parent) = scope_data.parent_scope {
+ scope_tree.entry(parent).or_default().push(SourceScope::new(index));
+ } else {
+ // Only the argument scope has no parent, because it's the root.
+ assert_eq!(index, OUTERMOST_SOURCE_SCOPE.index());
+ }
+ }
+
+ write_scope_tree(tcx, body, &scope_tree, w, OUTERMOST_SOURCE_SCOPE, 1)?;
+
+ // Add an empty line before the first block is printed.
+ writeln!(w)?;
+
+ Ok(())
+}
+
+fn write_mir_sig(tcx: TyCtxt<'_>, body: &Body<'_>, w: &mut dyn io::Write) -> io::Result<()> {
+ use rustc_hir::def::DefKind;
+
+ trace!("write_mir_sig: {:?}", body.source.instance);
+ let def_id = body.source.def_id();
+ let kind = tcx.def_kind(def_id);
+ let is_function = match kind {
+ DefKind::Fn | DefKind::AssocFn | DefKind::Ctor(..) => true,
+ _ => tcx.is_closure(def_id),
+ };
+ match (kind, body.source.promoted) {
+ (_, Some(i)) => write!(w, "{i:?} in ")?,
+ (DefKind::Const | DefKind::AssocConst, _) => write!(w, "const ")?,
+ (DefKind::Static(hir::Mutability::Not), _) => write!(w, "static ")?,
+ (DefKind::Static(hir::Mutability::Mut), _) => write!(w, "static mut ")?,
+ (_, _) if is_function => write!(w, "fn ")?,
+ (DefKind::AnonConst | DefKind::InlineConst, _) => {} // things like anon const, not an item
+ _ => bug!("Unexpected def kind {:?}", kind),
+ }
+
+ ty::print::with_forced_impl_filename_line! {
+ // see notes on #41697 elsewhere
+ write!(w, "{}", tcx.def_path_str(def_id))?
+ }
+
+ if body.source.promoted.is_none() && is_function {
+ write!(w, "(")?;
+
+ // fn argument types.
+ for (i, arg) in body.args_iter().enumerate() {
+ if i != 0 {
+ write!(w, ", ")?;
+ }
+ write!(w, "{:?}: {}", Place::from(arg), body.local_decls[arg].ty)?;
+ }
+
+ write!(w, ") -> {}", body.return_ty())?;
+ } else {
+ assert_eq!(body.arg_count, 0);
+ write!(w, ": {} =", body.return_ty())?;
+ }
+
+ if let Some(yield_ty) = body.yield_ty() {
+ writeln!(w)?;
+ writeln!(w, "yields {yield_ty}")?;
+ }
+
+ write!(w, " ")?;
+ // Next thing that gets printed is the opening {
+
+ Ok(())
+}
+
+fn write_user_type_annotations(
+ tcx: TyCtxt<'_>,
+ body: &Body<'_>,
+ w: &mut dyn io::Write,
+) -> io::Result<()> {
+ if !body.user_type_annotations.is_empty() {
+ writeln!(w, "| User Type Annotations")?;
+ }
+ for (index, annotation) in body.user_type_annotations.iter_enumerated() {
+ writeln!(
+ w,
+ "| {:?}: user_ty: {}, span: {}, inferred_ty: {}",
+ index.index(),
+ annotation.user_ty,
+ tcx.sess.source_map().span_to_embeddable_string(annotation.span),
+ with_no_trimmed_paths!(format!("{}", annotation.inferred_ty)),
+ )?;
+ }
+ if !body.user_type_annotations.is_empty() {
+ writeln!(w, "|")?;
+ }
+ Ok(())
+}
+
+pub fn dump_mir_def_ids(tcx: TyCtxt<'_>, single: Option<DefId>) -> Vec<DefId> {
+ if let Some(i) = single {
+ vec![i]
+ } else {
+ tcx.mir_keys(()).iter().map(|def_id| def_id.to_def_id()).collect()
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Basic blocks and their parts (statements, terminators, ...)
+
/// Write out a human-readable textual representation for the given basic block.
pub fn write_basic_block<'tcx, F>(
tcx: TyCtxt<'tcx>,
block: BasicBlock,
body: &Body<'tcx>,
extra_data: &mut F,
- w: &mut dyn Write,
+ w: &mut dyn io::Write,
) -> io::Result<()>
where
- F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+ F: FnMut(PassWhere, &mut dyn io::Write) -> io::Result<()>,
{
let data = &body[block];
@@ -400,10 +654,528 @@ where
writeln!(w, "{INDENT}}}")
}
+impl Debug for Statement<'_> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ use self::StatementKind::*;
+ match self.kind {
+ Assign(box (ref place, ref rv)) => write!(fmt, "{place:?} = {rv:?}"),
+ FakeRead(box (ref cause, ref place)) => {
+ write!(fmt, "FakeRead({cause:?}, {place:?})")
+ }
+ Retag(ref kind, ref place) => write!(
+ fmt,
+ "Retag({}{:?})",
+ match kind {
+ RetagKind::FnEntry => "[fn entry] ",
+ RetagKind::TwoPhase => "[2phase] ",
+ RetagKind::Raw => "[raw] ",
+ RetagKind::Default => "",
+ },
+ place,
+ ),
+ StorageLive(ref place) => write!(fmt, "StorageLive({place:?})"),
+ StorageDead(ref place) => write!(fmt, "StorageDead({place:?})"),
+ SetDiscriminant { ref place, variant_index } => {
+ write!(fmt, "discriminant({place:?}) = {variant_index:?}")
+ }
+ Deinit(ref place) => write!(fmt, "Deinit({place:?})"),
+ PlaceMention(ref place) => {
+ write!(fmt, "PlaceMention({place:?})")
+ }
+ AscribeUserType(box (ref place, ref c_ty), ref variance) => {
+ write!(fmt, "AscribeUserType({place:?}, {variance:?}, {c_ty:?})")
+ }
+ Coverage(box self::Coverage { ref kind, code_region: Some(ref rgn) }) => {
+ write!(fmt, "Coverage::{kind:?} for {rgn:?}")
+ }
+ Coverage(box ref coverage) => write!(fmt, "Coverage::{:?}", coverage.kind),
+ Intrinsic(box ref intrinsic) => write!(fmt, "{intrinsic}"),
+ ConstEvalCounter => write!(fmt, "ConstEvalCounter"),
+ Nop => write!(fmt, "nop"),
+ }
+ }
+}
+
+impl Display for NonDivergingIntrinsic<'_> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ Self::Assume(op) => write!(f, "assume({op:?})"),
+ Self::CopyNonOverlapping(CopyNonOverlapping { src, dst, count }) => {
+ write!(f, "copy_nonoverlapping(dst = {dst:?}, src = {src:?}, count = {count:?})")
+ }
+ }
+ }
+}
+
+impl<'tcx> Debug for TerminatorKind<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ self.fmt_head(fmt)?;
+ let successor_count = self.successors().count();
+ let labels = self.fmt_successor_labels();
+ assert_eq!(successor_count, labels.len());
+
+ // `Cleanup` is already included in successors
+ let show_unwind = !matches!(self.unwind(), None | Some(UnwindAction::Cleanup(_)));
+ let fmt_unwind = |fmt: &mut Formatter<'_>| -> fmt::Result {
+ write!(fmt, "unwind ")?;
+ match self.unwind() {
+ // Not needed or included in successors
+ None | Some(UnwindAction::Cleanup(_)) => unreachable!(),
+ Some(UnwindAction::Continue) => write!(fmt, "continue"),
+ Some(UnwindAction::Unreachable) => write!(fmt, "unreachable"),
+ Some(UnwindAction::Terminate(reason)) => {
+ write!(fmt, "terminate({})", reason.as_short_str())
+ }
+ }
+ };
+
+ match (successor_count, show_unwind) {
+ (0, false) => Ok(()),
+ (0, true) => {
+ write!(fmt, " -> ")?;
+ fmt_unwind(fmt)
+ }
+ (1, false) => write!(fmt, " -> {:?}", self.successors().next().unwrap()),
+ _ => {
+ write!(fmt, " -> [")?;
+ for (i, target) in self.successors().enumerate() {
+ if i > 0 {
+ write!(fmt, ", ")?;
+ }
+ write!(fmt, "{}: {:?}", labels[i], target)?;
+ }
+ if show_unwind {
+ write!(fmt, ", ")?;
+ fmt_unwind(fmt)?;
+ }
+ write!(fmt, "]")
+ }
+ }
+ }
+}
+
+impl<'tcx> TerminatorKind<'tcx> {
+ /// Writes the "head" part of the terminator; that is, its name and the data it uses to pick the
+ /// successor basic block, if any. The only information not included is the list of possible
+ /// successors, which may be rendered differently between the text and the graphviz format.
+ pub fn fmt_head<W: fmt::Write>(&self, fmt: &mut W) -> fmt::Result {
+ use self::TerminatorKind::*;
+ match self {
+ Goto { .. } => write!(fmt, "goto"),
+ SwitchInt { discr, .. } => write!(fmt, "switchInt({discr:?})"),
+ Return => write!(fmt, "return"),
+ GeneratorDrop => write!(fmt, "generator_drop"),
+ UnwindResume => write!(fmt, "resume"),
+ UnwindTerminate(reason) => {
+ write!(fmt, "abort({})", reason.as_short_str())
+ }
+ Yield { value, resume_arg, .. } => write!(fmt, "{resume_arg:?} = yield({value:?})"),
+ Unreachable => write!(fmt, "unreachable"),
+ Drop { place, .. } => write!(fmt, "drop({place:?})"),
+ Call { func, args, destination, .. } => {
+ write!(fmt, "{destination:?} = ")?;
+ write!(fmt, "{func:?}(")?;
+ for (index, arg) in args.iter().enumerate() {
+ if index > 0 {
+ write!(fmt, ", ")?;
+ }
+ write!(fmt, "{arg:?}")?;
+ }
+ write!(fmt, ")")
+ }
+ Assert { cond, expected, msg, .. } => {
+ write!(fmt, "assert(")?;
+ if !expected {
+ write!(fmt, "!")?;
+ }
+ write!(fmt, "{cond:?}, ")?;
+ msg.fmt_assert_args(fmt)?;
+ write!(fmt, ")")
+ }
+ FalseEdge { .. } => write!(fmt, "falseEdge"),
+ FalseUnwind { .. } => write!(fmt, "falseUnwind"),
+ InlineAsm { template, ref operands, options, .. } => {
+ write!(fmt, "asm!(\"{}\"", InlineAsmTemplatePiece::to_string(template))?;
+ for op in operands {
+ write!(fmt, ", ")?;
+ let print_late = |&late| if late { "late" } else { "" };
+ match op {
+ InlineAsmOperand::In { reg, value } => {
+ write!(fmt, "in({reg}) {value:?}")?;
+ }
+ InlineAsmOperand::Out { reg, late, place: Some(place) } => {
+ write!(fmt, "{}out({}) {:?}", print_late(late), reg, place)?;
+ }
+ InlineAsmOperand::Out { reg, late, place: None } => {
+ write!(fmt, "{}out({}) _", print_late(late), reg)?;
+ }
+ InlineAsmOperand::InOut {
+ reg,
+ late,
+ in_value,
+ out_place: Some(out_place),
+ } => {
+ write!(
+ fmt,
+ "in{}out({}) {:?} => {:?}",
+ print_late(late),
+ reg,
+ in_value,
+ out_place
+ )?;
+ }
+ InlineAsmOperand::InOut { reg, late, in_value, out_place: None } => {
+ write!(fmt, "in{}out({}) {:?} => _", print_late(late), reg, in_value)?;
+ }
+ InlineAsmOperand::Const { value } => {
+ write!(fmt, "const {value:?}")?;
+ }
+ InlineAsmOperand::SymFn { value } => {
+ write!(fmt, "sym_fn {value:?}")?;
+ }
+ InlineAsmOperand::SymStatic { def_id } => {
+ write!(fmt, "sym_static {def_id:?}")?;
+ }
+ }
+ }
+ write!(fmt, ", options({options:?}))")
+ }
+ }
+ }
+
+ /// Returns the list of labels for the edges to the successor basic blocks.
+ pub fn fmt_successor_labels(&self) -> Vec<Cow<'static, str>> {
+ use self::TerminatorKind::*;
+ match *self {
+ Return | UnwindResume | UnwindTerminate(_) | Unreachable | GeneratorDrop => vec![],
+ Goto { .. } => vec!["".into()],
+ SwitchInt { ref targets, .. } => targets
+ .values
+ .iter()
+ .map(|&u| Cow::Owned(u.to_string()))
+ .chain(iter::once("otherwise".into()))
+ .collect(),
+ Call { target: Some(_), unwind: UnwindAction::Cleanup(_), .. } => {
+ vec!["return".into(), "unwind".into()]
+ }
+ Call { target: Some(_), unwind: _, .. } => vec!["return".into()],
+ Call { target: None, unwind: UnwindAction::Cleanup(_), .. } => vec!["unwind".into()],
+ Call { target: None, unwind: _, .. } => vec![],
+ Yield { drop: Some(_), .. } => vec!["resume".into(), "drop".into()],
+ Yield { drop: None, .. } => vec!["resume".into()],
+ Drop { unwind: UnwindAction::Cleanup(_), .. } => vec!["return".into(), "unwind".into()],
+ Drop { unwind: _, .. } => vec!["return".into()],
+ Assert { unwind: UnwindAction::Cleanup(_), .. } => {
+ vec!["success".into(), "unwind".into()]
+ }
+ Assert { unwind: _, .. } => vec!["success".into()],
+ FalseEdge { .. } => vec!["real".into(), "imaginary".into()],
+ FalseUnwind { unwind: UnwindAction::Cleanup(_), .. } => {
+ vec!["real".into(), "unwind".into()]
+ }
+ FalseUnwind { unwind: _, .. } => vec!["real".into()],
+ InlineAsm { destination: Some(_), unwind: UnwindAction::Cleanup(_), .. } => {
+ vec!["return".into(), "unwind".into()]
+ }
+ InlineAsm { destination: Some(_), unwind: _, .. } => {
+ vec!["return".into()]
+ }
+ InlineAsm { destination: None, unwind: UnwindAction::Cleanup(_), .. } => {
+ vec!["unwind".into()]
+ }
+ InlineAsm { destination: None, unwind: _, .. } => vec![],
+ }
+ }
+}
+
+impl<'tcx> Debug for Rvalue<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ use self::Rvalue::*;
+
+ match *self {
+ Use(ref place) => write!(fmt, "{place:?}"),
+ Repeat(ref a, b) => {
+ write!(fmt, "[{a:?}; ")?;
+ pretty_print_const(b, fmt, false)?;
+ write!(fmt, "]")
+ }
+ Len(ref a) => write!(fmt, "Len({a:?})"),
+ Cast(ref kind, ref place, ref ty) => {
+ with_no_trimmed_paths!(write!(fmt, "{place:?} as {ty} ({kind:?})"))
+ }
+ BinaryOp(ref op, box (ref a, ref b)) => write!(fmt, "{op:?}({a:?}, {b:?})"),
+ CheckedBinaryOp(ref op, box (ref a, ref b)) => {
+ write!(fmt, "Checked{op:?}({a:?}, {b:?})")
+ }
+ UnaryOp(ref op, ref a) => write!(fmt, "{op:?}({a:?})"),
+ Discriminant(ref place) => write!(fmt, "discriminant({place:?})"),
+ NullaryOp(ref op, ref t) => {
+ let t = with_no_trimmed_paths!(format!("{}", t));
+ match op {
+ NullOp::SizeOf => write!(fmt, "SizeOf({t})"),
+ NullOp::AlignOf => write!(fmt, "AlignOf({t})"),
+ NullOp::OffsetOf(fields) => write!(fmt, "OffsetOf({t}, {fields:?})"),
+ }
+ }
+ ThreadLocalRef(did) => ty::tls::with(|tcx| {
+ let muta = tcx.static_mutability(did).unwrap().prefix_str();
+ write!(fmt, "&/*tls*/ {}{}", muta, tcx.def_path_str(did))
+ }),
+ Ref(region, borrow_kind, ref place) => {
+ let kind_str = match borrow_kind {
+ BorrowKind::Shared => "",
+ BorrowKind::Fake => "fake ",
+ BorrowKind::Mut { .. } => "mut ",
+ };
+
+ // When printing regions, add trailing space if necessary.
+ let print_region = ty::tls::with(|tcx| {
+ tcx.sess.verbose() || tcx.sess.opts.unstable_opts.identify_regions
+ });
+ let region = if print_region {
+ let mut region = region.to_string();
+ if !region.is_empty() {
+ region.push(' ');
+ }
+ region
+ } else {
+ // Do not even print 'static
+ String::new()
+ };
+ write!(fmt, "&{region}{kind_str}{place:?}")
+ }
+
+ CopyForDeref(ref place) => write!(fmt, "deref_copy {place:#?}"),
+
+ AddressOf(mutability, ref place) => {
+ let kind_str = match mutability {
+ Mutability::Mut => "mut",
+ Mutability::Not => "const",
+ };
+
+ write!(fmt, "&raw {kind_str} {place:?}")
+ }
+
+ Aggregate(ref kind, ref places) => {
+ let fmt_tuple = |fmt: &mut Formatter<'_>, name: &str| {
+ let mut tuple_fmt = fmt.debug_tuple(name);
+ for place in places {
+ tuple_fmt.field(place);
+ }
+ tuple_fmt.finish()
+ };
+
+ match **kind {
+ AggregateKind::Array(_) => write!(fmt, "{places:?}"),
+
+ AggregateKind::Tuple => {
+ if places.is_empty() {
+ write!(fmt, "()")
+ } else {
+ fmt_tuple(fmt, "")
+ }
+ }
+
+ AggregateKind::Adt(adt_did, variant, args, _user_ty, _) => {
+ ty::tls::with(|tcx| {
+ let variant_def = &tcx.adt_def(adt_did).variant(variant);
+ let args = tcx.lift(args).expect("could not lift for printing");
+ let name = FmtPrinter::new(tcx, Namespace::ValueNS)
+ .print_def_path(variant_def.def_id, args)?
+ .into_buffer();
+
+ match variant_def.ctor_kind() {
+ Some(CtorKind::Const) => fmt.write_str(&name),
+ Some(CtorKind::Fn) => fmt_tuple(fmt, &name),
+ None => {
+ let mut struct_fmt = fmt.debug_struct(&name);
+ for (field, place) in iter::zip(&variant_def.fields, places) {
+ struct_fmt.field(field.name.as_str(), place);
+ }
+ struct_fmt.finish()
+ }
+ }
+ })
+ }
+
+ AggregateKind::Closure(def_id, args) => ty::tls::with(|tcx| {
+ let name = if tcx.sess.opts.unstable_opts.span_free_formats {
+ let args = tcx.lift(args).unwrap();
+ format!("{{closure@{}}}", tcx.def_path_str_with_args(def_id, args),)
+ } else {
+ let span = tcx.def_span(def_id);
+ format!(
+ "{{closure@{}}}",
+ tcx.sess.source_map().span_to_diagnostic_string(span)
+ )
+ };
+ let mut struct_fmt = fmt.debug_struct(&name);
+
+ // FIXME(project-rfc-2229#48): This should be a list of capture names/places
+ if let Some(def_id) = def_id.as_local()
+ && let Some(upvars) = tcx.upvars_mentioned(def_id)
+ {
+ for (&var_id, place) in iter::zip(upvars.keys(), places) {
+ let var_name = tcx.hir().name(var_id);
+ struct_fmt.field(var_name.as_str(), place);
+ }
+ } else {
+ for (index, place) in places.iter().enumerate() {
+ struct_fmt.field(&format!("{index}"), place);
+ }
+ }
+
+ struct_fmt.finish()
+ }),
+
+ AggregateKind::Generator(def_id, _, _) => ty::tls::with(|tcx| {
+ let name = format!("{{generator@{:?}}}", tcx.def_span(def_id));
+ let mut struct_fmt = fmt.debug_struct(&name);
+
+ // FIXME(project-rfc-2229#48): This should be a list of capture names/places
+ if let Some(def_id) = def_id.as_local()
+ && let Some(upvars) = tcx.upvars_mentioned(def_id)
+ {
+ for (&var_id, place) in iter::zip(upvars.keys(), places) {
+ let var_name = tcx.hir().name(var_id);
+ struct_fmt.field(var_name.as_str(), place);
+ }
+ } else {
+ for (index, place) in places.iter().enumerate() {
+ struct_fmt.field(&format!("{index}"), place);
+ }
+ }
+
+ struct_fmt.finish()
+ }),
+ }
+ }
+
+ ShallowInitBox(ref place, ref ty) => {
+ with_no_trimmed_paths!(write!(fmt, "ShallowInitBox({place:?}, {ty})"))
+ }
+ }
+ }
+}
+
+impl<'tcx> Debug for Operand<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ use self::Operand::*;
+ match *self {
+ Constant(ref a) => write!(fmt, "{a:?}"),
+ Copy(ref place) => write!(fmt, "{place:?}"),
+ Move(ref place) => write!(fmt, "move {place:?}"),
+ }
+ }
+}
+
+impl<'tcx> Debug for ConstOperand<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ write!(fmt, "{self}")
+ }
+}
+
+impl<'tcx> Display for ConstOperand<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ match self.ty().kind() {
+ ty::FnDef(..) => {}
+ _ => write!(fmt, "const ")?,
+ }
+ Display::fmt(&self.const_, fmt)
+ }
+}
+
+impl Debug for Place<'_> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ self.as_ref().fmt(fmt)
+ }
+}
+
+impl Debug for PlaceRef<'_> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ pre_fmt_projection(self.projection, fmt)?;
+ write!(fmt, "{:?}", self.local)?;
+ post_fmt_projection(self.projection, fmt)
+ }
+}
+
+fn pre_fmt_projection(projection: &[PlaceElem<'_>], fmt: &mut Formatter<'_>) -> fmt::Result {
+ for &elem in projection.iter().rev() {
+ match elem {
+ ProjectionElem::OpaqueCast(_)
+ | ProjectionElem::Subtype(_)
+ | ProjectionElem::Downcast(_, _)
+ | ProjectionElem::Field(_, _) => {
+ write!(fmt, "(").unwrap();
+ }
+ ProjectionElem::Deref => {
+ write!(fmt, "(*").unwrap();
+ }
+ ProjectionElem::Index(_)
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. } => {}
+ }
+ }
+
+ Ok(())
+}
+
+fn post_fmt_projection(projection: &[PlaceElem<'_>], fmt: &mut Formatter<'_>) -> fmt::Result {
+ for &elem in projection.iter() {
+ match elem {
+ ProjectionElem::OpaqueCast(ty) => {
+ write!(fmt, " as {ty})")?;
+ }
+ ProjectionElem::Subtype(ty) => {
+ write!(fmt, " as subtype {ty})")?;
+ }
+ ProjectionElem::Downcast(Some(name), _index) => {
+ write!(fmt, " as {name})")?;
+ }
+ ProjectionElem::Downcast(None, index) => {
+ write!(fmt, " as variant#{index:?})")?;
+ }
+ ProjectionElem::Deref => {
+ write!(fmt, ")")?;
+ }
+ ProjectionElem::Field(field, ty) => {
+ with_no_trimmed_paths!(write!(fmt, ".{:?}: {})", field.index(), ty)?);
+ }
+ ProjectionElem::Index(ref index) => {
+ write!(fmt, "[{index:?}]")?;
+ }
+ ProjectionElem::ConstantIndex { offset, min_length, from_end: false } => {
+ write!(fmt, "[{offset:?} of {min_length:?}]")?;
+ }
+ ProjectionElem::ConstantIndex { offset, min_length, from_end: true } => {
+ write!(fmt, "[-{offset:?} of {min_length:?}]")?;
+ }
+ ProjectionElem::Subslice { from, to: 0, from_end: true } => {
+ write!(fmt, "[{from:?}:]")?;
+ }
+ ProjectionElem::Subslice { from: 0, to, from_end: true } => {
+ write!(fmt, "[:-{to:?}]")?;
+ }
+ ProjectionElem::Subslice { from, to, from_end: true } => {
+ write!(fmt, "[{from:?}:-{to:?}]")?;
+ }
+ ProjectionElem::Subslice { from, to, from_end: false } => {
+ write!(fmt, "[{from:?}..{to:?}]")?;
+ }
+ }
+ }
+
+ Ok(())
+}
+
/// After we print the main statement, we sometimes dump extra
/// information. There's often a lot of little things "nuzzled up" in
/// a statement.
-fn write_extra<'tcx, F>(tcx: TyCtxt<'tcx>, write: &mut dyn Write, mut visit_op: F) -> io::Result<()>
+fn write_extra<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ write: &mut dyn io::Write,
+ mut visit_op: F,
+) -> io::Result<()>
where
F: FnMut(&mut ExtraComments<'tcx>),
{
@@ -443,10 +1215,10 @@ fn use_verbose(ty: Ty<'_>, fn_def: bool) -> bool {
}
impl<'tcx> Visitor<'tcx> for ExtraComments<'tcx> {
- fn visit_constant(&mut self, constant: &Constant<'tcx>, _location: Location) {
- let Constant { span, user_ty, literal } = constant;
- if use_verbose(literal.ty(), true) {
- self.push("mir::Constant");
+ fn visit_constant(&mut self, constant: &ConstOperand<'tcx>, _location: Location) {
+ let ConstOperand { span, user_ty, const_ } = constant;
+ if use_verbose(const_.ty(), true) {
+ self.push("mir::ConstOperand");
self.push(&format!(
"+ span: {}",
self.tcx.sess.source_map().span_to_embeddable_string(*span)
@@ -455,34 +1227,35 @@ impl<'tcx> Visitor<'tcx> for ExtraComments<'tcx> {
self.push(&format!("+ user_ty: {user_ty:?}"));
}
- // FIXME: this is a poor version of `pretty_print_const_value`.
- let fmt_val = |val: &ConstValue<'tcx>| match val {
- ConstValue::ZeroSized => "<ZST>".to_string(),
- ConstValue::Scalar(s) => format!("Scalar({s:?})"),
- ConstValue::Slice { .. } => "Slice(..)".to_string(),
- ConstValue::ByRef { .. } => "ByRef(..)".to_string(),
+ let fmt_val = |val: ConstValue<'tcx>, ty: Ty<'tcx>| {
+ let tcx = self.tcx;
+ rustc_data_structures::make_display(move |fmt| {
+ pretty_print_const_value_tcx(tcx, val, ty, fmt)
+ })
};
+ // FIXME: call pretty_print_const_valtree?
let fmt_valtree = |valtree: &ty::ValTree<'tcx>| match valtree {
- ty::ValTree::Leaf(leaf) => format!("ValTree::Leaf({leaf:?})"),
- ty::ValTree::Branch(_) => "ValTree::Branch(..)".to_string(),
+ ty::ValTree::Leaf(leaf) => format!("Leaf({leaf:?})"),
+ ty::ValTree::Branch(_) => "Branch(..)".to_string(),
};
- let val = match literal {
- ConstantKind::Ty(ct) => match ct.kind() {
- ty::ConstKind::Param(p) => format!("Param({p})"),
+ let val = match const_ {
+ Const::Ty(ct) => match ct.kind() {
+ ty::ConstKind::Param(p) => format!("ty::Param({p})"),
ty::ConstKind::Unevaluated(uv) => {
- format!("Unevaluated({}, {:?})", self.tcx.def_path_str(uv.def), uv.args,)
+ format!("ty::Unevaluated({}, {:?})", self.tcx.def_path_str(uv.def), uv.args,)
}
- ty::ConstKind::Value(val) => format!("Value({})", fmt_valtree(&val)),
+ ty::ConstKind::Value(val) => format!("ty::Valtree({})", fmt_valtree(&val)),
+ // No `ty::` prefix since we also use this to represent errors from `mir::Unevaluated`.
ty::ConstKind::Error(_) => "Error".to_string(),
// These variants shouldn't exist in the MIR.
ty::ConstKind::Placeholder(_)
| ty::ConstKind::Infer(_)
| ty::ConstKind::Expr(_)
- | ty::ConstKind::Bound(..) => bug!("unexpected MIR constant: {:?}", literal),
+ | ty::ConstKind::Bound(..) => bug!("unexpected MIR constant: {:?}", const_),
},
- ConstantKind::Unevaluated(uv, _) => {
+ Const::Unevaluated(uv, _) => {
format!(
"Unevaluated({}, {:?}, {:?})",
self.tcx.def_path_str(uv.def),
@@ -490,16 +1263,13 @@ impl<'tcx> Visitor<'tcx> for ExtraComments<'tcx> {
uv.promoted,
)
}
- // To keep the diffs small, we render this like we render `ty::Const::Value`.
- //
- // This changes once `ty::Const::Value` is represented using valtrees.
- ConstantKind::Val(val, _) => format!("Value({})", fmt_val(&val)),
+ Const::Val(val, ty) => format!("Value({})", fmt_val(*val, *ty)),
};
// This reflects what `Const` looked liked before `val` was renamed
// as `kind`. We print it like this to avoid having to update
// expected output in a lot of tests.
- self.push(&format!("+ literal: Const {{ ty: {}, val: {} }}", literal.ty(), val));
+ self.push(&format!("+ const_: Const {{ ty: {}, val: {} }}", const_.ty(), val));
}
}
@@ -536,162 +1306,15 @@ fn comment(tcx: TyCtxt<'_>, SourceInfo { span, scope }: SourceInfo) -> String {
format!("scope {} at {}", scope.index(), location,)
}
-/// Prints local variables in a scope tree.
-fn write_scope_tree(
- tcx: TyCtxt<'_>,
- body: &Body<'_>,
- scope_tree: &FxHashMap<SourceScope, Vec<SourceScope>>,
- w: &mut dyn Write,
- parent: SourceScope,
- depth: usize,
-) -> io::Result<()> {
- let indent = depth * INDENT.len();
-
- // Local variable debuginfo.
- for var_debug_info in &body.var_debug_info {
- if var_debug_info.source_info.scope != parent {
- // Not declared in this scope.
- continue;
- }
-
- let indented_debug_info = format!(
- "{0:1$}debug {2} => {3:?};",
- INDENT, indent, var_debug_info.name, var_debug_info.value,
- );
-
- if tcx.sess.opts.unstable_opts.mir_include_spans {
- writeln!(
- w,
- "{0:1$} // in {2}",
- indented_debug_info,
- ALIGN,
- comment(tcx, var_debug_info.source_info),
- )?;
- } else {
- writeln!(w, "{indented_debug_info}")?;
- }
- }
-
- // Local variable types.
- for (local, local_decl) in body.local_decls.iter_enumerated() {
- if (1..body.arg_count + 1).contains(&local.index()) {
- // Skip over argument locals, they're printed in the signature.
- continue;
- }
-
- if local_decl.source_info.scope != parent {
- // Not declared in this scope.
- continue;
- }
-
- let mut_str = local_decl.mutability.prefix_str();
-
- let mut indented_decl =
- format!("{0:1$}let {2}{3:?}: {4:?}", INDENT, indent, mut_str, local, local_decl.ty);
- if let Some(user_ty) = &local_decl.user_ty {
- for user_ty in user_ty.projections() {
- write!(indented_decl, " as {user_ty:?}").unwrap();
- }
- }
- indented_decl.push(';');
-
- let local_name = if local == RETURN_PLACE { " return place" } else { "" };
-
- if tcx.sess.opts.unstable_opts.mir_include_spans {
- writeln!(
- w,
- "{0:1$} //{2} in {3}",
- indented_decl,
- ALIGN,
- local_name,
- comment(tcx, local_decl.source_info),
- )?;
- } else {
- writeln!(w, "{indented_decl}",)?;
- }
- }
-
- let Some(children) = scope_tree.get(&parent) else {
- return Ok(());
- };
-
- for &child in children {
- let child_data = &body.source_scopes[child];
- assert_eq!(child_data.parent_scope, Some(parent));
-
- let (special, span) = if let Some((callee, callsite_span)) = child_data.inlined {
- (
- format!(
- " (inlined {}{})",
- if callee.def.requires_caller_location(tcx) { "#[track_caller] " } else { "" },
- callee
- ),
- Some(callsite_span),
- )
- } else {
- (String::new(), None)
- };
-
- let indented_header = format!("{0:1$}scope {2}{3} {{", "", indent, child.index(), special);
-
- if tcx.sess.opts.unstable_opts.mir_include_spans {
- if let Some(span) = span {
- writeln!(
- w,
- "{0:1$} // at {2}",
- indented_header,
- ALIGN,
- tcx.sess.source_map().span_to_embeddable_string(span),
- )?;
- } else {
- writeln!(w, "{indented_header}")?;
- }
- } else {
- writeln!(w, "{indented_header}")?;
- }
-
- write_scope_tree(tcx, body, scope_tree, w, child, depth + 1)?;
- writeln!(w, "{0:1$}}}", "", depth * INDENT.len())?;
- }
-
- Ok(())
-}
-
-/// Write out a human-readable textual representation of the MIR's `fn` type and the types of its
-/// local variables (both user-defined bindings and compiler temporaries).
-pub fn write_mir_intro<'tcx>(
- tcx: TyCtxt<'tcx>,
- body: &Body<'_>,
- w: &mut dyn Write,
-) -> io::Result<()> {
- write_mir_sig(tcx, body, w)?;
- writeln!(w, "{{")?;
-
- // construct a scope tree and write it out
- let mut scope_tree: FxHashMap<SourceScope, Vec<SourceScope>> = Default::default();
- for (index, scope_data) in body.source_scopes.iter().enumerate() {
- if let Some(parent) = scope_data.parent_scope {
- scope_tree.entry(parent).or_default().push(SourceScope::new(index));
- } else {
- // Only the argument scope has no parent, because it's the root.
- assert_eq!(index, OUTERMOST_SOURCE_SCOPE.index());
- }
- }
-
- write_scope_tree(tcx, body, &scope_tree, w, OUTERMOST_SOURCE_SCOPE, 1)?;
-
- // Add an empty line before the first block is printed.
- writeln!(w)?;
-
- Ok(())
-}
+///////////////////////////////////////////////////////////////////////////
+// Allocations
/// Find all `AllocId`s mentioned (recursively) in the MIR body and print their corresponding
/// allocations.
pub fn write_allocations<'tcx>(
tcx: TyCtxt<'tcx>,
body: &Body<'_>,
- w: &mut dyn Write,
+ w: &mut dyn io::Write,
) -> io::Result<()> {
fn alloc_ids_from_alloc(
alloc: ConstAllocation<'_>,
@@ -702,24 +1325,28 @@ pub fn write_allocations<'tcx>(
fn alloc_ids_from_const_val(val: ConstValue<'_>) -> impl Iterator<Item = AllocId> + '_ {
match val {
ConstValue::Scalar(interpret::Scalar::Ptr(ptr, _)) => {
- Either::Left(Either::Left(std::iter::once(ptr.provenance)))
+ Either::Left(std::iter::once(ptr.provenance))
}
- ConstValue::Scalar(interpret::Scalar::Int { .. }) => {
- Either::Left(Either::Right(std::iter::empty()))
+ ConstValue::Scalar(interpret::Scalar::Int { .. }) => Either::Right(std::iter::empty()),
+ ConstValue::ZeroSized => Either::Right(std::iter::empty()),
+ ConstValue::Slice { .. } => {
+ // `u8`/`str` slices, shouldn't contain pointers that we want to print.
+ Either::Right(std::iter::empty())
}
- ConstValue::ZeroSized => Either::Left(Either::Right(std::iter::empty())),
- ConstValue::ByRef { alloc, .. } | ConstValue::Slice { data: alloc, .. } => {
- Either::Right(alloc_ids_from_alloc(alloc))
+ ConstValue::Indirect { alloc_id, .. } => {
+ // FIXME: we don't actually want to print all of these, since some are printed nicely directly as values inline in MIR.
+ // Really we'd want `pretty_print_const_value` to decide which allocations to print, instead of having a separate visitor.
+ Either::Left(std::iter::once(alloc_id))
}
}
}
struct CollectAllocIds(BTreeSet<AllocId>);
impl<'tcx> Visitor<'tcx> for CollectAllocIds {
- fn visit_constant(&mut self, c: &Constant<'tcx>, _: Location) {
- match c.literal {
- ConstantKind::Ty(_) | ConstantKind::Unevaluated(..) => {}
- ConstantKind::Val(val, _) => {
+ fn visit_constant(&mut self, c: &ConstOperand<'tcx>, _: Location) {
+ match c.const_ {
+ Const::Ty(_) | Const::Unevaluated(..) => {}
+ Const::Val(val, _) => {
self.0.extend(alloc_ids_from_const_val(val));
}
}
@@ -736,7 +1363,7 @@ pub fn write_allocations<'tcx>(
let mut todo: Vec<_> = seen.iter().copied().collect();
while let Some(id) = todo.pop() {
let mut write_allocation_track_relocs =
- |w: &mut dyn Write, alloc: ConstAllocation<'tcx>| -> io::Result<()> {
+ |w: &mut dyn io::Write, alloc: ConstAllocation<'tcx>| -> io::Result<()> {
// `.rev()` because we are popping them from the back of the `todo` vector.
for id in alloc_ids_from_alloc(alloc).rev() {
if seen.insert(id) {
@@ -997,91 +1624,173 @@ pub fn write_allocation_bytes<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
Ok(())
}
-fn write_mir_sig(tcx: TyCtxt<'_>, body: &Body<'_>, w: &mut dyn Write) -> io::Result<()> {
- use rustc_hir::def::DefKind;
-
- trace!("write_mir_sig: {:?}", body.source.instance);
- let def_id = body.source.def_id();
- let kind = tcx.def_kind(def_id);
- let is_function = match kind {
- DefKind::Fn | DefKind::AssocFn | DefKind::Ctor(..) => true,
- _ => tcx.is_closure(def_id),
- };
- match (kind, body.source.promoted) {
- (_, Some(i)) => write!(w, "{i:?} in ")?,
- (DefKind::Const | DefKind::AssocConst, _) => write!(w, "const ")?,
- (DefKind::Static(hir::Mutability::Not), _) => write!(w, "static ")?,
- (DefKind::Static(hir::Mutability::Mut), _) => write!(w, "static mut ")?,
- (_, _) if is_function => write!(w, "fn ")?,
- (DefKind::AnonConst | DefKind::InlineConst, _) => {} // things like anon const, not an item
- _ => bug!("Unexpected def kind {:?}", kind),
- }
-
- ty::print::with_forced_impl_filename_line! {
- // see notes on #41697 elsewhere
- write!(w, "{}", tcx.def_path_str(def_id))?
- }
+///////////////////////////////////////////////////////////////////////////
+// Constants
- if body.source.promoted.is_none() && is_function {
- write!(w, "(")?;
+fn pretty_print_byte_str(fmt: &mut Formatter<'_>, byte_str: &[u8]) -> fmt::Result {
+ write!(fmt, "b\"{}\"", byte_str.escape_ascii())
+}
- // fn argument types.
- for (i, arg) in body.args_iter().enumerate() {
- if i != 0 {
- write!(w, ", ")?;
- }
- write!(w, "{:?}: {}", Place::from(arg), body.local_decls[arg].ty)?;
+fn comma_sep<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ fmt: &mut Formatter<'_>,
+ elems: Vec<(ConstValue<'tcx>, Ty<'tcx>)>,
+) -> fmt::Result {
+ let mut first = true;
+ for (ct, ty) in elems {
+ if !first {
+ fmt.write_str(", ")?;
}
-
- write!(w, ") -> {}", body.return_ty())?;
- } else {
- assert_eq!(body.arg_count, 0);
- write!(w, ": {} =", body.return_ty())?;
+ pretty_print_const_value_tcx(tcx, ct, ty, fmt)?;
+ first = false;
}
-
- if let Some(yield_ty) = body.yield_ty() {
- writeln!(w)?;
- writeln!(w, "yields {yield_ty}")?;
- }
-
- write!(w, " ")?;
- // Next thing that gets printed is the opening {
-
Ok(())
}
-fn write_user_type_annotations(
- tcx: TyCtxt<'_>,
- body: &Body<'_>,
- w: &mut dyn Write,
-) -> io::Result<()> {
- if !body.user_type_annotations.is_empty() {
- writeln!(w, "| User Type Annotations")?;
- }
- for (index, annotation) in body.user_type_annotations.iter_enumerated() {
- writeln!(
- w,
- "| {:?}: user_ty: {:?}, span: {}, inferred_ty: {:?}",
- index.index(),
- annotation.user_ty,
- tcx.sess.source_map().span_to_embeddable_string(annotation.span),
- annotation.inferred_ty,
- )?;
+fn pretty_print_const_value_tcx<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ct: ConstValue<'tcx>,
+ ty: Ty<'tcx>,
+ fmt: &mut Formatter<'_>,
+) -> fmt::Result {
+ use crate::ty::print::PrettyPrinter;
+
+ if tcx.sess.verbose() {
+ fmt.write_str(&format!("ConstValue({ct:?}: {ty})"))?;
+ return Ok(());
}
- if !body.user_type_annotations.is_empty() {
- writeln!(w, "|")?;
+
+ let u8_type = tcx.types.u8;
+ match (ct, ty.kind()) {
+ // Byte/string slices, printed as (byte) string literals.
+ (_, ty::Ref(_, inner_ty, _)) if matches!(inner_ty.kind(), ty::Str) => {
+ if let Some(data) = ct.try_get_slice_bytes_for_diagnostics(tcx) {
+ fmt.write_str(&format!("{:?}", String::from_utf8_lossy(data)))?;
+ return Ok(());
+ }
+ }
+ (_, ty::Ref(_, inner_ty, _)) if matches!(inner_ty.kind(), ty::Slice(t) if *t == u8_type) => {
+ if let Some(data) = ct.try_get_slice_bytes_for_diagnostics(tcx) {
+ pretty_print_byte_str(fmt, data)?;
+ return Ok(());
+ }
+ }
+ (ConstValue::Indirect { alloc_id, offset }, ty::Array(t, n)) if *t == u8_type => {
+ let n = n.try_to_target_usize(tcx).unwrap();
+ let alloc = tcx.global_alloc(alloc_id).unwrap_memory();
+ // cast is ok because we already checked for pointer size (32 or 64 bit) above
+ let range = AllocRange { start: offset, size: Size::from_bytes(n) };
+ let byte_str = alloc.inner().get_bytes_strip_provenance(&tcx, range).unwrap();
+ fmt.write_str("*")?;
+ pretty_print_byte_str(fmt, byte_str)?;
+ return Ok(());
+ }
+ // Aggregates, printed as array/tuple/struct/variant construction syntax.
+ //
+ // NB: the `has_non_region_param` check ensures that we can use
+ // the `destructure_const` query with an empty `ty::ParamEnv` without
+ // introducing ICEs (e.g. via `layout_of`) from missing bounds.
+ // E.g. `transmute([0usize; 2]): (u8, *mut T)` needs to know `T: Sized`
+ // to be able to destructure the tuple into `(0u8, *mut T)`
+ (_, ty::Array(..) | ty::Tuple(..) | ty::Adt(..)) if !ty.has_non_region_param() => {
+ let ct = tcx.lift(ct).unwrap();
+ let ty = tcx.lift(ty).unwrap();
+ if let Some(contents) = tcx.try_destructure_mir_constant_for_diagnostics(ct, ty) {
+ let fields: Vec<(ConstValue<'_>, Ty<'_>)> = contents.fields.to_vec();
+ match *ty.kind() {
+ ty::Array(..) => {
+ fmt.write_str("[")?;
+ comma_sep(tcx, fmt, fields)?;
+ fmt.write_str("]")?;
+ }
+ ty::Tuple(..) => {
+ fmt.write_str("(")?;
+ comma_sep(tcx, fmt, fields)?;
+ if contents.fields.len() == 1 {
+ fmt.write_str(",")?;
+ }
+ fmt.write_str(")")?;
+ }
+ ty::Adt(def, _) if def.variants().is_empty() => {
+ fmt.write_str(&format!("{{unreachable(): {ty}}}"))?;
+ }
+ ty::Adt(def, args) => {
+ let variant_idx = contents
+ .variant
+ .expect("destructed mir constant of adt without variant idx");
+ let variant_def = &def.variant(variant_idx);
+ let args = tcx.lift(args).unwrap();
+ let mut cx = FmtPrinter::new(tcx, Namespace::ValueNS);
+ cx.print_alloc_ids = true;
+ let cx = cx.print_value_path(variant_def.def_id, args)?;
+ fmt.write_str(&cx.into_buffer())?;
+
+ match variant_def.ctor_kind() {
+ Some(CtorKind::Const) => {}
+ Some(CtorKind::Fn) => {
+ fmt.write_str("(")?;
+ comma_sep(tcx, fmt, fields)?;
+ fmt.write_str(")")?;
+ }
+ None => {
+ fmt.write_str(" {{ ")?;
+ let mut first = true;
+ for (field_def, (ct, ty)) in iter::zip(&variant_def.fields, fields)
+ {
+ if !first {
+ fmt.write_str(", ")?;
+ }
+ write!(fmt, "{}: ", field_def.name)?;
+ pretty_print_const_value_tcx(tcx, ct, ty, fmt)?;
+ first = false;
+ }
+ fmt.write_str(" }}")?;
+ }
+ }
+ }
+ _ => unreachable!(),
+ }
+ return Ok(());
+ }
+ }
+ (ConstValue::Scalar(scalar), _) => {
+ let mut cx = FmtPrinter::new(tcx, Namespace::ValueNS);
+ cx.print_alloc_ids = true;
+ let ty = tcx.lift(ty).unwrap();
+ cx = cx.pretty_print_const_scalar(scalar, ty)?;
+ fmt.write_str(&cx.into_buffer())?;
+ return Ok(());
+ }
+ (ConstValue::ZeroSized, ty::FnDef(d, s)) => {
+ let mut cx = FmtPrinter::new(tcx, Namespace::ValueNS);
+ cx.print_alloc_ids = true;
+ let cx = cx.print_value_path(*d, s)?;
+ fmt.write_str(&cx.into_buffer())?;
+ return Ok(());
+ }
+ // FIXME(oli-obk): also pretty print arrays and other aggregate constants by reading
+ // their fields instead of just dumping the memory.
+ _ => {}
}
- Ok(())
+ // Fall back to debug pretty printing for invalid constants.
+ write!(fmt, "{ct:?}: {ty}")
}
-pub fn dump_mir_def_ids(tcx: TyCtxt<'_>, single: Option<DefId>) -> Vec<DefId> {
- if let Some(i) = single {
- vec![i]
- } else {
- tcx.mir_keys(()).iter().map(|def_id| def_id.to_def_id()).collect()
- }
+pub(crate) fn pretty_print_const_value<'tcx>(
+ ct: ConstValue<'tcx>,
+ ty: Ty<'tcx>,
+ fmt: &mut Formatter<'_>,
+) -> fmt::Result {
+ ty::tls::with(|tcx| {
+ let ct = tcx.lift(ct).unwrap();
+ let ty = tcx.lift(ty).unwrap();
+ pretty_print_const_value_tcx(tcx, ct, ty, fmt)
+ })
}
+///////////////////////////////////////////////////////////////////////////
+// Miscellaneous
+
/// Calc converted u64 decimal into hex and return it's length in chars
///
/// ```ignore (cannot-test-private-function)
diff --git a/compiler/rustc_middle/src/mir/query.rs b/compiler/rustc_middle/src/mir/query.rs
index 71bec49af..c74a9536b 100644
--- a/compiler/rustc_middle/src/mir/query.rs
+++ b/compiler/rustc_middle/src/mir/query.rs
@@ -1,6 +1,5 @@
//! Values computed by queries that use MIR.
-use crate::mir::interpret::ConstValue;
use crate::ty::{self, OpaqueHiddenType, Ty, TyCtxt};
use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::unord::UnordSet;
@@ -16,7 +15,7 @@ use smallvec::SmallVec;
use std::cell::Cell;
use std::fmt::{self, Debug};
-use super::SourceInfo;
+use super::{ConstValue, SourceInfo};
#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
pub enum UnsafetyViolationKind {
@@ -334,7 +333,7 @@ rustc_data_structures::static_assert_size!(ConstraintCategory<'_>, 16);
///
/// See also `rustc_const_eval::borrow_check::constraints`.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)]
-#[derive(TyEncodable, TyDecodable, HashStable, Lift, TypeVisitable, TypeFoldable)]
+#[derive(TyEncodable, TyDecodable, HashStable, TypeVisitable, TypeFoldable)]
pub enum ConstraintCategory<'tcx> {
Return(ReturnConstraint),
Yield,
@@ -415,8 +414,7 @@ impl<'tcx> ClosureOutlivesSubjectTy<'tcx> {
pub fn bind(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Self {
let inner = tcx.fold_regions(ty, |r, depth| match r.kind() {
ty::ReVar(vid) => {
- let br =
- ty::BoundRegion { var: ty::BoundVar::new(vid.index()), kind: ty::BrAnon(None) };
+ let br = ty::BoundRegion { var: ty::BoundVar::new(vid.index()), kind: ty::BrAnon };
ty::Region::new_late_bound(tcx, depth, br)
}
_ => bug!("unexpected region in ClosureOutlivesSubjectTy: {r:?}"),
diff --git a/compiler/rustc_middle/src/mir/spanview.rs b/compiler/rustc_middle/src/mir/spanview.rs
index 20a9e6889..a5358687c 100644
--- a/compiler/rustc_middle/src/mir/spanview.rs
+++ b/compiler/rustc_middle/src/mir/spanview.rs
@@ -238,45 +238,6 @@ pub fn source_range_no_file(tcx: TyCtxt<'_>, span: Span) -> String {
format!("{}:{}-{}:{}", start.line, start.col.to_usize() + 1, end.line, end.col.to_usize() + 1)
}
-pub fn statement_kind_name(statement: &Statement<'_>) -> &'static str {
- use StatementKind::*;
- match statement.kind {
- Assign(..) => "Assign",
- FakeRead(..) => "FakeRead",
- SetDiscriminant { .. } => "SetDiscriminant",
- Deinit(..) => "Deinit",
- StorageLive(..) => "StorageLive",
- StorageDead(..) => "StorageDead",
- Retag(..) => "Retag",
- PlaceMention(..) => "PlaceMention",
- AscribeUserType(..) => "AscribeUserType",
- Coverage(..) => "Coverage",
- Intrinsic(..) => "Intrinsic",
- ConstEvalCounter => "ConstEvalCounter",
- Nop => "Nop",
- }
-}
-
-pub fn terminator_kind_name(term: &Terminator<'_>) -> &'static str {
- use TerminatorKind::*;
- match term.kind {
- Goto { .. } => "Goto",
- SwitchInt { .. } => "SwitchInt",
- Resume => "Resume",
- Terminate => "Terminate",
- Return => "Return",
- Unreachable => "Unreachable",
- Drop { .. } => "Drop",
- Call { .. } => "Call",
- Assert { .. } => "Assert",
- Yield { .. } => "Yield",
- GeneratorDrop => "GeneratorDrop",
- FalseEdge { .. } => "FalseEdge",
- FalseUnwind { .. } => "FalseUnwind",
- InlineAsm { .. } => "InlineAsm",
- }
-}
-
fn statement_span_viewable<'tcx>(
tcx: TyCtxt<'tcx>,
body_span: Span,
@@ -304,7 +265,7 @@ fn terminator_span_viewable<'tcx>(
if !body_span.contains(span) {
return None;
}
- let id = format!("{}:{}", bb.index(), terminator_kind_name(term));
+ let id = format!("{}:{}", bb.index(), term.kind.name());
let tooltip = tooltip(tcx, &id, span, vec![], &data.terminator);
Some(SpanViewable { bb, span, id, tooltip })
}
@@ -631,7 +592,7 @@ fn tooltip<'tcx>(
"\n{}{}: {}: {:?}",
TOOLTIP_INDENT,
source_range,
- statement_kind_name(&statement),
+ statement.kind.name(),
statement
));
}
@@ -641,7 +602,7 @@ fn tooltip<'tcx>(
"\n{}{}: {}: {:?}",
TOOLTIP_INDENT,
source_range,
- terminator_kind_name(term),
+ term.kind.name(),
term.kind
));
}
diff --git a/compiler/rustc_middle/src/mir/statement.rs b/compiler/rustc_middle/src/mir/statement.rs
new file mode 100644
index 000000000..3471d620e
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/statement.rs
@@ -0,0 +1,464 @@
+/// Functionality for statements, operands, places, and things that appear in them.
+use super::{interpret::GlobalAlloc, *};
+
+///////////////////////////////////////////////////////////////////////////
+// Statements
+
+/// A statement in a basic block, including information about its source code.
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub struct Statement<'tcx> {
+ pub source_info: SourceInfo,
+ pub kind: StatementKind<'tcx>,
+}
+
+impl Statement<'_> {
+ /// Changes a statement to a nop. This is both faster than deleting instructions and avoids
+ /// invalidating statement indices in `Location`s.
+ pub fn make_nop(&mut self) {
+ self.kind = StatementKind::Nop
+ }
+
+ /// Changes a statement to a nop and returns the original statement.
+ #[must_use = "If you don't need the statement, use `make_nop` instead"]
+ pub fn replace_nop(&mut self) -> Self {
+ Statement {
+ source_info: self.source_info,
+ kind: mem::replace(&mut self.kind, StatementKind::Nop),
+ }
+ }
+}
+
+impl<'tcx> StatementKind<'tcx> {
+ pub fn as_assign_mut(&mut self) -> Option<&mut (Place<'tcx>, Rvalue<'tcx>)> {
+ match self {
+ StatementKind::Assign(x) => Some(x),
+ _ => None,
+ }
+ }
+
+ pub fn as_assign(&self) -> Option<&(Place<'tcx>, Rvalue<'tcx>)> {
+ match self {
+ StatementKind::Assign(x) => Some(x),
+ _ => None,
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Places
+
+impl<V, T> ProjectionElem<V, T> {
+ /// Returns `true` if the target of this projection may refer to a different region of memory
+ /// than the base.
+ fn is_indirect(&self) -> bool {
+ match self {
+ Self::Deref => true,
+
+ Self::Field(_, _)
+ | Self::Index(_)
+ | Self::OpaqueCast(_)
+ | Self::Subtype(_)
+ | Self::ConstantIndex { .. }
+ | Self::Subslice { .. }
+ | Self::Downcast(_, _) => false,
+ }
+ }
+
+ /// Returns `true` if the target of this projection always refers to the same memory region
+ /// whatever the state of the program.
+ pub fn is_stable_offset(&self) -> bool {
+ match self {
+ Self::Deref | Self::Index(_) => false,
+ Self::Field(_, _)
+ | Self::OpaqueCast(_)
+ | Self::Subtype(_)
+ | Self::ConstantIndex { .. }
+ | Self::Subslice { .. }
+ | Self::Downcast(_, _) => true,
+ }
+ }
+
+ /// Returns `true` if this is a `Downcast` projection with the given `VariantIdx`.
+ pub fn is_downcast_to(&self, v: VariantIdx) -> bool {
+ matches!(*self, Self::Downcast(_, x) if x == v)
+ }
+
+ /// Returns `true` if this is a `Field` projection with the given index.
+ pub fn is_field_to(&self, f: FieldIdx) -> bool {
+ matches!(*self, Self::Field(x, _) if x == f)
+ }
+
+ /// Returns `true` if this is accepted inside `VarDebugInfoContents::Place`.
+ pub fn can_use_in_debuginfo(&self) -> bool {
+ match self {
+ Self::ConstantIndex { from_end: false, .. }
+ | Self::Deref
+ | Self::Downcast(_, _)
+ | Self::Field(_, _) => true,
+ Self::ConstantIndex { from_end: true, .. }
+ | Self::Index(_)
+ | Self::Subtype(_)
+ | Self::OpaqueCast(_)
+ | Self::Subslice { .. } => false,
+ }
+ }
+}
+
+/// Alias for projections as they appear in `UserTypeProjection`, where we
+/// need neither the `V` parameter for `Index` nor the `T` for `Field`.
+pub type ProjectionKind = ProjectionElem<(), ()>;
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct PlaceRef<'tcx> {
+ pub local: Local,
+ pub projection: &'tcx [PlaceElem<'tcx>],
+}
+
+// Once we stop implementing `Ord` for `DefId`,
+// this impl will be unnecessary. Until then, we'll
+// leave this impl in place to prevent re-adding a
+// dependency on the `Ord` impl for `DefId`
+impl<'tcx> !PartialOrd for PlaceRef<'tcx> {}
+
+impl<'tcx> Place<'tcx> {
+ // FIXME change this to a const fn by also making List::empty a const fn.
+ pub fn return_place() -> Place<'tcx> {
+ Place { local: RETURN_PLACE, projection: List::empty() }
+ }
+
+ /// Returns `true` if this `Place` contains a `Deref` projection.
+ ///
+ /// If `Place::is_indirect` returns false, the caller knows that the `Place` refers to the
+ /// same region of memory as its base.
+ pub fn is_indirect(&self) -> bool {
+ self.projection.iter().any(|elem| elem.is_indirect())
+ }
+
+ /// Returns `true` if this `Place`'s first projection is `Deref`.
+ ///
+ /// This is useful because for MIR phases `AnalysisPhase::PostCleanup` and later,
+ /// `Deref` projections can only occur as the first projection. In that case this method
+ /// is equivalent to `is_indirect`, but faster.
+ pub fn is_indirect_first_projection(&self) -> bool {
+ self.as_ref().is_indirect_first_projection()
+ }
+
+ /// Finds the innermost `Local` from this `Place`, *if* it is either a local itself or
+ /// a single deref of a local.
+ #[inline(always)]
+ pub fn local_or_deref_local(&self) -> Option<Local> {
+ self.as_ref().local_or_deref_local()
+ }
+
+ /// If this place represents a local variable like `_X` with no
+ /// projections, return `Some(_X)`.
+ #[inline(always)]
+ pub fn as_local(&self) -> Option<Local> {
+ self.as_ref().as_local()
+ }
+
+ #[inline]
+ pub fn as_ref(&self) -> PlaceRef<'tcx> {
+ PlaceRef { local: self.local, projection: &self.projection }
+ }
+
+ /// Iterate over the projections in evaluation order, i.e., the first element is the base with
+ /// its projection and then subsequently more projections are added.
+ /// As a concrete example, given the place a.b.c, this would yield:
+ /// - (a, .b)
+ /// - (a.b, .c)
+ ///
+ /// Given a place without projections, the iterator is empty.
+ #[inline]
+ pub fn iter_projections(
+ self,
+ ) -> impl Iterator<Item = (PlaceRef<'tcx>, PlaceElem<'tcx>)> + DoubleEndedIterator {
+ self.as_ref().iter_projections()
+ }
+
+ /// Generates a new place by appending `more_projections` to the existing ones
+ /// and interning the result.
+ pub fn project_deeper(self, more_projections: &[PlaceElem<'tcx>], tcx: TyCtxt<'tcx>) -> Self {
+ if more_projections.is_empty() {
+ return self;
+ }
+
+ self.as_ref().project_deeper(more_projections, tcx)
+ }
+}
+
+impl From<Local> for Place<'_> {
+ #[inline]
+ fn from(local: Local) -> Self {
+ Place { local, projection: List::empty() }
+ }
+}
+
+impl<'tcx> PlaceRef<'tcx> {
+ /// Finds the innermost `Local` from this `Place`, *if* it is either a local itself or
+ /// a single deref of a local.
+ pub fn local_or_deref_local(&self) -> Option<Local> {
+ match *self {
+ PlaceRef { local, projection: [] }
+ | PlaceRef { local, projection: [ProjectionElem::Deref] } => Some(local),
+ _ => None,
+ }
+ }
+
+ /// Returns `true` if this `Place` contains a `Deref` projection.
+ ///
+ /// If `Place::is_indirect` returns false, the caller knows that the `Place` refers to the
+ /// same region of memory as its base.
+ pub fn is_indirect(&self) -> bool {
+ self.projection.iter().any(|elem| elem.is_indirect())
+ }
+
+ /// Returns `true` if this `Place`'s first projection is `Deref`.
+ ///
+ /// This is useful because for MIR phases `AnalysisPhase::PostCleanup` and later,
+ /// `Deref` projections can only occur as the first projection. In that case this method
+ /// is equivalent to `is_indirect`, but faster.
+ pub fn is_indirect_first_projection(&self) -> bool {
+ // To make sure this is not accidentally used in wrong mir phase
+ debug_assert!(
+ self.projection.is_empty() || !self.projection[1..].contains(&PlaceElem::Deref)
+ );
+ self.projection.first() == Some(&PlaceElem::Deref)
+ }
+
+ /// If this place represents a local variable like `_X` with no
+ /// projections, return `Some(_X)`.
+ #[inline]
+ pub fn as_local(&self) -> Option<Local> {
+ match *self {
+ PlaceRef { local, projection: [] } => Some(local),
+ _ => None,
+ }
+ }
+
+ #[inline]
+ pub fn last_projection(&self) -> Option<(PlaceRef<'tcx>, PlaceElem<'tcx>)> {
+ if let &[ref proj_base @ .., elem] = self.projection {
+ Some((PlaceRef { local: self.local, projection: proj_base }, elem))
+ } else {
+ None
+ }
+ }
+
+ /// Iterate over the projections in evaluation order, i.e., the first element is the base with
+ /// its projection and then subsequently more projections are added.
+ /// As a concrete example, given the place a.b.c, this would yield:
+ /// - (a, .b)
+ /// - (a.b, .c)
+ ///
+ /// Given a place without projections, the iterator is empty.
+ #[inline]
+ pub fn iter_projections(
+ self,
+ ) -> impl Iterator<Item = (PlaceRef<'tcx>, PlaceElem<'tcx>)> + DoubleEndedIterator {
+ self.projection.iter().enumerate().map(move |(i, proj)| {
+ let base = PlaceRef { local: self.local, projection: &self.projection[..i] };
+ (base, *proj)
+ })
+ }
+
+ /// Generates a new place by appending `more_projections` to the existing ones
+ /// and interning the result.
+ pub fn project_deeper(
+ self,
+ more_projections: &[PlaceElem<'tcx>],
+ tcx: TyCtxt<'tcx>,
+ ) -> Place<'tcx> {
+ let mut v: Vec<PlaceElem<'tcx>>;
+
+ let new_projections = if self.projection.is_empty() {
+ more_projections
+ } else {
+ v = Vec::with_capacity(self.projection.len() + more_projections.len());
+ v.extend(self.projection);
+ v.extend(more_projections);
+ &v
+ };
+
+ Place { local: self.local, projection: tcx.mk_place_elems(new_projections) }
+ }
+}
+
+impl From<Local> for PlaceRef<'_> {
+ #[inline]
+ fn from(local: Local) -> Self {
+ PlaceRef { local, projection: &[] }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Operands
+
+impl<'tcx> Operand<'tcx> {
+ /// Convenience helper to make a constant that refers to the fn
+ /// with given `DefId` and args. Since this is used to synthesize
+ /// MIR, assumes `user_ty` is None.
+ pub fn function_handle(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ args: impl IntoIterator<Item = GenericArg<'tcx>>,
+ span: Span,
+ ) -> Self {
+ let ty = Ty::new_fn_def(tcx, def_id, args);
+ Operand::Constant(Box::new(ConstOperand {
+ span,
+ user_ty: None,
+ const_: Const::Val(ConstValue::ZeroSized, ty),
+ }))
+ }
+
+ pub fn is_move(&self) -> bool {
+ matches!(self, Operand::Move(..))
+ }
+
+ /// Convenience helper to make a literal-like constant from a given scalar value.
+ /// Since this is used to synthesize MIR, assumes `user_ty` is None.
+ pub fn const_from_scalar(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ val: Scalar,
+ span: Span,
+ ) -> Operand<'tcx> {
+ debug_assert!({
+ let param_env_and_ty = ty::ParamEnv::empty().and(ty);
+ let type_size = tcx
+ .layout_of(param_env_and_ty)
+ .unwrap_or_else(|e| panic!("could not compute layout for {ty:?}: {e:?}"))
+ .size;
+ let scalar_size = match val {
+ Scalar::Int(int) => int.size(),
+ _ => panic!("Invalid scalar type {val:?}"),
+ };
+ scalar_size == type_size
+ });
+ Operand::Constant(Box::new(ConstOperand {
+ span,
+ user_ty: None,
+ const_: Const::Val(ConstValue::Scalar(val), ty),
+ }))
+ }
+
+ pub fn to_copy(&self) -> Self {
+ match *self {
+ Operand::Copy(_) | Operand::Constant(_) => self.clone(),
+ Operand::Move(place) => Operand::Copy(place),
+ }
+ }
+
+ /// Returns the `Place` that is the target of this `Operand`, or `None` if this `Operand` is a
+ /// constant.
+ pub fn place(&self) -> Option<Place<'tcx>> {
+ match self {
+ Operand::Copy(place) | Operand::Move(place) => Some(*place),
+ Operand::Constant(_) => None,
+ }
+ }
+
+ /// Returns the `ConstOperand` that is the target of this `Operand`, or `None` if this `Operand` is a
+ /// place.
+ pub fn constant(&self) -> Option<&ConstOperand<'tcx>> {
+ match self {
+ Operand::Constant(x) => Some(&**x),
+ Operand::Copy(_) | Operand::Move(_) => None,
+ }
+ }
+
+ /// Gets the `ty::FnDef` from an operand if it's a constant function item.
+ ///
+ /// While this is unlikely in general, it's the normal case of what you'll
+ /// find as the `func` in a [`TerminatorKind::Call`].
+ pub fn const_fn_def(&self) -> Option<(DefId, GenericArgsRef<'tcx>)> {
+ let const_ty = self.constant()?.const_.ty();
+ if let ty::FnDef(def_id, args) = *const_ty.kind() { Some((def_id, args)) } else { None }
+ }
+}
+
+impl<'tcx> ConstOperand<'tcx> {
+ pub fn check_static_ptr(&self, tcx: TyCtxt<'_>) -> Option<DefId> {
+ match self.const_.try_to_scalar() {
+ Some(Scalar::Ptr(ptr, _size)) => match tcx.global_alloc(ptr.provenance) {
+ GlobalAlloc::Static(def_id) => {
+ assert!(!tcx.is_thread_local_static(def_id));
+ Some(def_id)
+ }
+ _ => None,
+ },
+ _ => None,
+ }
+ }
+
+ #[inline]
+ pub fn ty(&self) -> Ty<'tcx> {
+ self.const_.ty()
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+/// Rvalues
+
+impl<'tcx> Rvalue<'tcx> {
+ /// Returns true if rvalue can be safely removed when the result is unused.
+ #[inline]
+ pub fn is_safe_to_remove(&self) -> bool {
+ match self {
+ // Pointer to int casts may be side-effects due to exposing the provenance.
+ // While the model is undecided, we should be conservative. See
+ // <https://www.ralfj.de/blog/2022/04/11/provenance-exposed.html>
+ Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => false,
+
+ Rvalue::Use(_)
+ | Rvalue::CopyForDeref(_)
+ | Rvalue::Repeat(_, _)
+ | Rvalue::Ref(_, _, _)
+ | Rvalue::ThreadLocalRef(_)
+ | Rvalue::AddressOf(_, _)
+ | Rvalue::Len(_)
+ | Rvalue::Cast(
+ CastKind::IntToInt
+ | CastKind::FloatToInt
+ | CastKind::FloatToFloat
+ | CastKind::IntToFloat
+ | CastKind::FnPtrToPtr
+ | CastKind::PtrToPtr
+ | CastKind::PointerCoercion(_)
+ | CastKind::PointerFromExposedAddress
+ | CastKind::DynStar
+ | CastKind::Transmute,
+ _,
+ _,
+ )
+ | Rvalue::BinaryOp(_, _)
+ | Rvalue::CheckedBinaryOp(_, _)
+ | Rvalue::NullaryOp(_, _)
+ | Rvalue::UnaryOp(_, _)
+ | Rvalue::Discriminant(_)
+ | Rvalue::Aggregate(_, _)
+ | Rvalue::ShallowInitBox(_, _) => true,
+ }
+ }
+}
+
+impl BorrowKind {
+ pub fn mutability(&self) -> Mutability {
+ match *self {
+ BorrowKind::Shared | BorrowKind::Fake => Mutability::Not,
+ BorrowKind::Mut { .. } => Mutability::Mut,
+ }
+ }
+
+ pub fn allows_two_phase_borrow(&self) -> bool {
+ match *self {
+ BorrowKind::Shared
+ | BorrowKind::Fake
+ | BorrowKind::Mut { kind: MutBorrowKind::Default | MutBorrowKind::ClosureCapture } => {
+ false
+ }
+ BorrowKind::Mut { kind: MutBorrowKind::TwoPhaseBorrow } => true,
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/syntax.rs b/compiler/rustc_middle/src/mir/syntax.rs
index be27bf75d..0b95fdfa1 100644
--- a/compiler/rustc_middle/src/mir/syntax.rs
+++ b/compiler/rustc_middle/src/mir/syntax.rs
@@ -3,7 +3,7 @@
//! This is in a dedicated file so that changes to this file can be reviewed more carefully.
//! The intention is that this file only contains datatype declarations, no code.
-use super::{BasicBlock, Constant, Local, SwitchTargets, UserTypeProjection};
+use super::{BasicBlock, Const, Local, UserTypeProjection};
use crate::mir::coverage::{CodeRegion, CoverageKind};
use crate::traits::Reveal;
@@ -24,6 +24,7 @@ use rustc_span::def_id::LocalDefId;
use rustc_span::symbol::Symbol;
use rustc_span::Span;
use rustc_target::asm::InlineAsmRegOrRegClass;
+use smallvec::SmallVec;
/// Represents the "flavors" of MIR.
///
@@ -122,7 +123,7 @@ pub enum AnalysisPhase {
/// * [`TerminatorKind::FalseEdge`]
/// * [`StatementKind::FakeRead`]
/// * [`StatementKind::AscribeUserType`]
- /// * [`Rvalue::Ref`] with `BorrowKind::Shallow`
+ /// * [`Rvalue::Ref`] with `BorrowKind::Fake`
///
/// Furthermore, `Deref` projections must be the first projection within any place (if they
/// appear at all)
@@ -138,6 +139,7 @@ pub enum RuntimePhase {
/// * [`TerminatorKind::Yield`]
/// * [`TerminatorKind::GeneratorDrop`]
/// * [`Rvalue::Aggregate`] for any `AggregateKind` except `Array`
+ /// * [`PlaceElem::OpaqueCast`]
///
/// And the following variants are allowed:
/// * [`StatementKind::Retag`]
@@ -180,7 +182,7 @@ pub enum BorrowKind {
/// should not prevent `if let None = x { ... }`, for example, because the
/// mutating `(*x as Some).0` can't affect the discriminant of `x`.
/// We can also report errors with this kind of borrow differently.
- Shallow,
+ Fake,
/// Data is mutable and not aliasable.
Mut { kind: MutBorrowKind },
@@ -380,6 +382,28 @@ pub enum StatementKind<'tcx> {
Nop,
}
+impl StatementKind<'_> {
+ /// Returns a simple string representation of a `StatementKind` variant, independent of any
+ /// values it might hold (e.g. `StatementKind::Assign` always returns `"Assign"`).
+ pub const fn name(&self) -> &'static str {
+ match self {
+ StatementKind::Assign(..) => "Assign",
+ StatementKind::FakeRead(..) => "FakeRead",
+ StatementKind::SetDiscriminant { .. } => "SetDiscriminant",
+ StatementKind::Deinit(..) => "Deinit",
+ StatementKind::StorageLive(..) => "StorageLive",
+ StatementKind::StorageDead(..) => "StorageDead",
+ StatementKind::Retag(..) => "Retag",
+ StatementKind::PlaceMention(..) => "PlaceMention",
+ StatementKind::AscribeUserType(..) => "AscribeUserType",
+ StatementKind::Coverage(..) => "Coverage",
+ StatementKind::Intrinsic(..) => "Intrinsic",
+ StatementKind::ConstEvalCounter => "ConstEvalCounter",
+ StatementKind::Nop => "Nop",
+ }
+ }
+}
+
#[derive(
Clone,
TyEncodable,
@@ -416,17 +440,6 @@ pub enum NonDivergingIntrinsic<'tcx> {
CopyNonOverlapping(CopyNonOverlapping<'tcx>),
}
-impl std::fmt::Display for NonDivergingIntrinsic<'_> {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- match self {
- Self::Assume(op) => write!(f, "assume({op:?})"),
- Self::CopyNonOverlapping(CopyNonOverlapping { src, dst, count }) => {
- write!(f, "copy_nonoverlapping(dst = {dst:?}, src = {src:?}, count = {count:?})")
- }
- }
- }
-}
-
/// Describes what kind of retag is to be performed.
#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, Hash, HashStable)]
#[rustc_pass_by_value]
@@ -593,13 +606,13 @@ pub enum TerminatorKind<'tcx> {
///
/// Only permitted in cleanup blocks. `Resume` is not permitted with `-C unwind=abort` after
/// deaggregation runs.
- Resume,
+ UnwindResume,
/// Indicates that the landing pad is finished and that the process should terminate.
///
/// Used to prevent unwinding for foreign items or with `-C unwind=abort`. Only permitted in
/// cleanup blocks.
- Terminate,
+ UnwindTerminate(UnwindTerminateReason),
/// Returns from the function.
///
@@ -790,8 +803,8 @@ impl TerminatorKind<'_> {
match self {
TerminatorKind::Goto { .. } => "Goto",
TerminatorKind::SwitchInt { .. } => "SwitchInt",
- TerminatorKind::Resume => "Resume",
- TerminatorKind::Terminate => "Terminate",
+ TerminatorKind::UnwindResume => "UnwindResume",
+ TerminatorKind::UnwindTerminate(_) => "UnwindTerminate",
TerminatorKind::Return => "Return",
TerminatorKind::Unreachable => "Unreachable",
TerminatorKind::Drop { .. } => "Drop",
@@ -806,6 +819,27 @@ impl TerminatorKind<'_> {
}
}
+#[derive(Debug, Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
+pub struct SwitchTargets {
+ /// Possible values. The locations to branch to in each case
+ /// are found in the corresponding indices from the `targets` vector.
+ pub(super) values: SmallVec<[u128; 1]>,
+
+ /// Possible branch sites. The last element of this vector is used
+ /// for the otherwise branch, so targets.len() == values.len() + 1
+ /// should hold.
+ //
+ // This invariant is quite non-obvious and also could be improved.
+ // One way to make this invariant is to have something like this instead:
+ //
+ // branches: Vec<(ConstInt, BasicBlock)>,
+ // otherwise: Option<BasicBlock> // exhaustive if None
+ //
+ // However we’ve decided to keep this as-is until we figure a case
+ // where some other approach seems to be strictly better than other.
+ pub(super) targets: SmallVec<[BasicBlock; 2]>,
+}
+
/// Action to be taken when a stack unwind happens.
#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
#[derive(TypeFoldable, TypeVisitable)]
@@ -820,11 +854,22 @@ pub enum UnwindAction {
/// Terminates the execution if unwind happens.
///
/// Depending on the platform and situation this may cause a non-unwindable panic or abort.
- Terminate,
+ Terminate(UnwindTerminateReason),
/// Cleanups to be done.
Cleanup(BasicBlock),
}
+/// The reason we are terminating the process during unwinding.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum UnwindTerminateReason {
+ /// Unwinding is just not possible given the ABI of this function.
+ Abi,
+ /// We were already cleaning up for an ongoing unwind, and a *second*, *nested* unwind was
+ /// triggered by the drop glue.
+ InCleanup,
+}
+
/// Information about an assertion failure.
#[derive(Clone, Hash, HashStable, PartialEq, Debug)]
#[derive(TyEncodable, TyDecodable, TypeFoldable, TypeVisitable)]
@@ -858,10 +903,10 @@ pub enum InlineAsmOperand<'tcx> {
out_place: Option<Place<'tcx>>,
},
Const {
- value: Box<Constant<'tcx>>,
+ value: Box<ConstOperand<'tcx>>,
},
SymFn {
- value: Box<Constant<'tcx>>,
+ value: Box<ConstOperand<'tcx>>,
},
SymStatic {
def_id: DefId,
@@ -1030,6 +1075,18 @@ pub enum ProjectionElem<V, T> {
/// Like an explicit cast from an opaque type to a concrete type, but without
/// requiring an intermediate variable.
OpaqueCast(T),
+
+ /// A `Subtype(T)` projection is applied to any `StatementKind::Assign` where
+ /// type of lvalue doesn't match the type of rvalue, the primary goal is making subtyping
+ /// explicit during optimizations and codegen.
+ ///
+ /// This projection doesn't impact the runtime behavior of the program except for potentially changing
+ /// some type metadata of the interpreter or codegen backend.
+ ///
+ /// This goal is achieved with mir_transform pass `Subtyper`, which runs right after
+ /// borrowchecker, as we only care about subtyping that can affect trait selection and
+ /// `TypeId`.
+ Subtype(T),
}
/// Alias for projections as they appear in places, where the base is a place
@@ -1081,7 +1138,22 @@ pub enum Operand<'tcx> {
Move(Place<'tcx>),
/// Constants are already semantically values, and remain unchanged.
- Constant(Box<Constant<'tcx>>),
+ Constant(Box<ConstOperand<'tcx>>),
+}
+
+#[derive(Clone, Copy, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ConstOperand<'tcx> {
+ pub span: Span,
+
+ /// Optional user-given type: for something like
+ /// `collect::<Vec<_>>`, this would be present and would
+ /// indicate that `Vec<_>` was explicitly specified.
+ ///
+ /// Needed for NLL to impose user-given type constraints.
+ pub user_ty: Option<UserTypeAnnotationIndex>,
+
+ pub const_: Const<'tcx>,
}
///////////////////////////////////////////////////////////////////////////
@@ -1274,7 +1346,7 @@ pub enum AggregateKind<'tcx> {
Generator(DefId, GenericArgsRef<'tcx>, hir::Movability),
}
-#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
pub enum NullOp<'tcx> {
/// Returns the size of a value of that type
SizeOf,
diff --git a/compiler/rustc_middle/src/mir/tcx.rs b/compiler/rustc_middle/src/mir/tcx.rs
index f79697936..7df25fc5c 100644
--- a/compiler/rustc_middle/src/mir/tcx.rs
+++ b/compiler/rustc_middle/src/mir/tcx.rs
@@ -69,7 +69,7 @@ impl<'tcx> PlaceTy<'tcx> {
param_env: ty::ParamEnv<'tcx>,
elem: &ProjectionElem<V, T>,
mut handle_field: impl FnMut(&Self, FieldIdx, T) -> Ty<'tcx>,
- mut handle_opaque_cast: impl FnMut(&Self, T) -> Ty<'tcx>,
+ mut handle_opaque_cast_and_subtype: impl FnMut(&Self, T) -> Ty<'tcx>,
) -> PlaceTy<'tcx>
where
V: ::std::fmt::Debug,
@@ -110,7 +110,12 @@ impl<'tcx> PlaceTy<'tcx> {
PlaceTy { ty: self.ty, variant_index: Some(index) }
}
ProjectionElem::Field(f, fty) => PlaceTy::from_ty(handle_field(&self, f, fty)),
- ProjectionElem::OpaqueCast(ty) => PlaceTy::from_ty(handle_opaque_cast(&self, ty)),
+ ProjectionElem::OpaqueCast(ty) => {
+ PlaceTy::from_ty(handle_opaque_cast_and_subtype(&self, ty))
+ }
+ ProjectionElem::Subtype(ty) => {
+ PlaceTy::from_ty(handle_opaque_cast_and_subtype(&self, ty))
+ }
};
debug!("projection_ty self: {:?} elem: {:?} yields: {:?}", self, elem, answer);
answer
@@ -227,7 +232,7 @@ impl<'tcx> Operand<'tcx> {
{
match self {
&Operand::Copy(ref l) | &Operand::Move(ref l) => l.ty(local_decls, tcx).ty,
- Operand::Constant(c) => c.literal.ty(),
+ Operand::Constant(c) => c.const_.ty(),
}
}
}
@@ -273,7 +278,7 @@ impl BorrowKind {
// We have no type corresponding to a shallow borrow, so use
// `&` as an approximation.
- BorrowKind::Shallow => hir::Mutability::Not,
+ BorrowKind::Fake => hir::Mutability::Not,
}
}
}
diff --git a/compiler/rustc_middle/src/mir/terminator.rs b/compiler/rustc_middle/src/mir/terminator.rs
index 1f878d23b..02aab4a89 100644
--- a/compiler/rustc_middle/src/mir/terminator.rs
+++ b/compiler/rustc_middle/src/mir/terminator.rs
@@ -1,38 +1,16 @@
+/// Functionality for terminators and helper types that appear in terminators.
+use rustc_hir::LangItem;
use smallvec::SmallVec;
use super::{BasicBlock, InlineAsmOperand, Operand, SourceInfo, TerminatorKind, UnwindAction};
-use rustc_ast::InlineAsmTemplatePiece;
pub use rustc_ast::Mutability;
use rustc_macros::HashStable;
-use std::borrow::Cow;
-use std::fmt::{self, Debug, Formatter, Write};
use std::iter;
use std::slice;
pub use super::query::*;
use super::*;
-#[derive(Debug, Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
-pub struct SwitchTargets {
- /// Possible values. The locations to branch to in each case
- /// are found in the corresponding indices from the `targets` vector.
- values: SmallVec<[u128; 1]>,
-
- /// Possible branch sites. The last element of this vector is used
- /// for the otherwise branch, so targets.len() == values.len() + 1
- /// should hold.
- //
- // This invariant is quite non-obvious and also could be improved.
- // One way to make this invariant is to have something like this instead:
- //
- // branches: Vec<(ConstInt, BasicBlock)>,
- // otherwise: Option<BasicBlock> // exhaustive if None
- //
- // However we’ve decided to keep this as-is until we figure a case
- // where some other approach seems to be strictly better than other.
- targets: SmallVec<[BasicBlock; 2]>,
-}
-
impl SwitchTargets {
/// Creates switch targets from an iterator of values and target blocks.
///
@@ -100,6 +78,202 @@ impl<'a> Iterator for SwitchTargetsIter<'a> {
impl<'a> ExactSizeIterator for SwitchTargetsIter<'a> {}
+impl UnwindAction {
+ fn cleanup_block(self) -> Option<BasicBlock> {
+ match self {
+ UnwindAction::Cleanup(bb) => Some(bb),
+ UnwindAction::Continue | UnwindAction::Unreachable | UnwindAction::Terminate(_) => None,
+ }
+ }
+}
+
+impl UnwindTerminateReason {
+ pub fn as_str(self) -> &'static str {
+ // Keep this in sync with the messages in `core/src/panicking.rs`.
+ match self {
+ UnwindTerminateReason::Abi => "panic in a function that cannot unwind",
+ UnwindTerminateReason::InCleanup => "panic in a destructor during cleanup",
+ }
+ }
+
+ /// A short representation of this used for MIR printing.
+ pub fn as_short_str(self) -> &'static str {
+ match self {
+ UnwindTerminateReason::Abi => "abi",
+ UnwindTerminateReason::InCleanup => "cleanup",
+ }
+ }
+
+ pub fn lang_item(self) -> LangItem {
+ match self {
+ UnwindTerminateReason::Abi => LangItem::PanicCannotUnwind,
+ UnwindTerminateReason::InCleanup => LangItem::PanicInCleanup,
+ }
+ }
+}
+
+impl<O> AssertKind<O> {
+ /// Returns true if this an overflow checking assertion controlled by -C overflow-checks.
+ pub fn is_optional_overflow_check(&self) -> bool {
+ use AssertKind::*;
+ use BinOp::*;
+ matches!(self, OverflowNeg(..) | Overflow(Add | Sub | Mul | Shl | Shr, ..))
+ }
+
+ /// Get the message that is printed at runtime when this assertion fails.
+ ///
+ /// The caller is expected to handle `BoundsCheck` and `MisalignedPointerDereference` by
+ /// invoking the appropriate lang item (panic_bounds_check/panic_misaligned_pointer_dereference)
+ /// instead of printing a static message.
+ pub fn description(&self) -> &'static str {
+ use AssertKind::*;
+ match self {
+ Overflow(BinOp::Add, _, _) => "attempt to add with overflow",
+ Overflow(BinOp::Sub, _, _) => "attempt to subtract with overflow",
+ Overflow(BinOp::Mul, _, _) => "attempt to multiply with overflow",
+ Overflow(BinOp::Div, _, _) => "attempt to divide with overflow",
+ Overflow(BinOp::Rem, _, _) => "attempt to calculate the remainder with overflow",
+ OverflowNeg(_) => "attempt to negate with overflow",
+ Overflow(BinOp::Shr, _, _) => "attempt to shift right with overflow",
+ Overflow(BinOp::Shl, _, _) => "attempt to shift left with overflow",
+ Overflow(op, _, _) => bug!("{:?} cannot overflow", op),
+ DivisionByZero(_) => "attempt to divide by zero",
+ RemainderByZero(_) => "attempt to calculate the remainder with a divisor of zero",
+ ResumedAfterReturn(GeneratorKind::Gen) => "generator resumed after completion",
+ ResumedAfterReturn(GeneratorKind::Async(_)) => "`async fn` resumed after completion",
+ ResumedAfterPanic(GeneratorKind::Gen) => "generator resumed after panicking",
+ ResumedAfterPanic(GeneratorKind::Async(_)) => "`async fn` resumed after panicking",
+ BoundsCheck { .. } | MisalignedPointerDereference { .. } => {
+ bug!("Unexpected AssertKind")
+ }
+ }
+ }
+
+ /// Format the message arguments for the `assert(cond, msg..)` terminator in MIR printing.
+ ///
+ /// Needs to be kept in sync with the run-time behavior (which is defined by
+ /// `AssertKind::description` and the lang items mentioned in its docs).
+ /// Note that we deliberately show more details here than we do at runtime, such as the actual
+ /// numbers that overflowed -- it is much easier to do so here than at runtime.
+ pub fn fmt_assert_args<W: fmt::Write>(&self, f: &mut W) -> fmt::Result
+ where
+ O: Debug,
+ {
+ use AssertKind::*;
+ match self {
+ BoundsCheck { ref len, ref index } => write!(
+ f,
+ "\"index out of bounds: the length is {{}} but the index is {{}}\", {len:?}, {index:?}"
+ ),
+
+ OverflowNeg(op) => {
+ write!(f, "\"attempt to negate `{{}}`, which would overflow\", {op:?}")
+ }
+ DivisionByZero(op) => write!(f, "\"attempt to divide `{{}}` by zero\", {op:?}"),
+ RemainderByZero(op) => write!(
+ f,
+ "\"attempt to calculate the remainder of `{{}}` with a divisor of zero\", {op:?}"
+ ),
+ Overflow(BinOp::Add, l, r) => write!(
+ f,
+ "\"attempt to compute `{{}} + {{}}`, which would overflow\", {l:?}, {r:?}"
+ ),
+ Overflow(BinOp::Sub, l, r) => write!(
+ f,
+ "\"attempt to compute `{{}} - {{}}`, which would overflow\", {l:?}, {r:?}"
+ ),
+ Overflow(BinOp::Mul, l, r) => write!(
+ f,
+ "\"attempt to compute `{{}} * {{}}`, which would overflow\", {l:?}, {r:?}"
+ ),
+ Overflow(BinOp::Div, l, r) => write!(
+ f,
+ "\"attempt to compute `{{}} / {{}}`, which would overflow\", {l:?}, {r:?}"
+ ),
+ Overflow(BinOp::Rem, l, r) => write!(
+ f,
+ "\"attempt to compute the remainder of `{{}} % {{}}`, which would overflow\", {l:?}, {r:?}"
+ ),
+ Overflow(BinOp::Shr, _, r) => {
+ write!(f, "\"attempt to shift right by `{{}}`, which would overflow\", {r:?}")
+ }
+ Overflow(BinOp::Shl, _, r) => {
+ write!(f, "\"attempt to shift left by `{{}}`, which would overflow\", {r:?}")
+ }
+ MisalignedPointerDereference { required, found } => {
+ write!(
+ f,
+ "\"misaligned pointer dereference: address must be a multiple of {{}} but is {{}}\", {required:?}, {found:?}"
+ )
+ }
+ _ => write!(f, "\"{}\"", self.description()),
+ }
+ }
+
+ /// Format the diagnostic message for use in a lint (e.g. when the assertion fails during const-eval).
+ ///
+ /// Needs to be kept in sync with the run-time behavior (which is defined by
+ /// `AssertKind::description` and the lang items mentioned in its docs).
+ /// Note that we deliberately show more details here than we do at runtime, such as the actual
+ /// numbers that overflowed -- it is much easier to do so here than at runtime.
+ pub fn diagnostic_message(&self) -> DiagnosticMessage {
+ use crate::fluent_generated::*;
+ use AssertKind::*;
+
+ match self {
+ BoundsCheck { .. } => middle_bounds_check,
+ Overflow(BinOp::Shl, _, _) => middle_assert_shl_overflow,
+ Overflow(BinOp::Shr, _, _) => middle_assert_shr_overflow,
+ Overflow(_, _, _) => middle_assert_op_overflow,
+ OverflowNeg(_) => middle_assert_overflow_neg,
+ DivisionByZero(_) => middle_assert_divide_by_zero,
+ RemainderByZero(_) => middle_assert_remainder_by_zero,
+ ResumedAfterReturn(GeneratorKind::Async(_)) => middle_assert_async_resume_after_return,
+ ResumedAfterReturn(GeneratorKind::Gen) => middle_assert_generator_resume_after_return,
+ ResumedAfterPanic(GeneratorKind::Async(_)) => middle_assert_async_resume_after_panic,
+ ResumedAfterPanic(GeneratorKind::Gen) => middle_assert_generator_resume_after_panic,
+
+ MisalignedPointerDereference { .. } => middle_assert_misaligned_ptr_deref,
+ }
+ }
+
+ pub fn add_args(self, adder: &mut dyn FnMut(Cow<'static, str>, DiagnosticArgValue<'static>))
+ where
+ O: fmt::Debug,
+ {
+ use AssertKind::*;
+
+ macro_rules! add {
+ ($name: expr, $value: expr) => {
+ adder($name.into(), $value.into_diagnostic_arg());
+ };
+ }
+
+ match self {
+ BoundsCheck { len, index } => {
+ add!("len", format!("{len:?}"));
+ add!("index", format!("{index:?}"));
+ }
+ Overflow(BinOp::Shl | BinOp::Shr, _, val)
+ | DivisionByZero(val)
+ | RemainderByZero(val)
+ | OverflowNeg(val) => {
+ add!("val", format!("{val:#?}"));
+ }
+ Overflow(binop, left, right) => {
+ add!("op", binop.to_hir_binop().as_str());
+ add!("left", format!("{left:#?}"));
+ add!("right", format!("{right:#?}"));
+ }
+ ResumedAfterReturn(_) | ResumedAfterPanic(_) => {}
+ MisalignedPointerDereference { required, found } => {
+ add!("required", format!("{required:#?}"));
+ add!("found", format!("{found:#?}"));
+ }
+ }
+ }
+}
+
#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
pub struct Terminator<'tcx> {
pub source_info: SourceInfo,
@@ -155,8 +329,8 @@ impl<'tcx> TerminatorKind<'tcx> {
| InlineAsm { destination: Some(t), unwind: _, .. } => {
Some(t).into_iter().chain((&[]).into_iter().copied())
}
- Resume
- | Terminate
+ UnwindResume
+ | UnwindTerminate(_)
| GeneratorDrop
| Return
| Unreachable
@@ -197,8 +371,8 @@ impl<'tcx> TerminatorKind<'tcx> {
| InlineAsm { destination: Some(ref mut t), unwind: _, .. } => {
Some(t).into_iter().chain(&mut [])
}
- Resume
- | Terminate
+ UnwindResume
+ | UnwindTerminate(_)
| GeneratorDrop
| Return
| Unreachable
@@ -214,8 +388,8 @@ impl<'tcx> TerminatorKind<'tcx> {
pub fn unwind(&self) -> Option<&UnwindAction> {
match *self {
TerminatorKind::Goto { .. }
- | TerminatorKind::Resume
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindResume
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::GeneratorDrop
@@ -233,8 +407,8 @@ impl<'tcx> TerminatorKind<'tcx> {
pub fn unwind_mut(&mut self) -> Option<&mut UnwindAction> {
match *self {
TerminatorKind::Goto { .. }
- | TerminatorKind::Resume
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindResume
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::GeneratorDrop
@@ -264,174 +438,6 @@ impl<'tcx> TerminatorKind<'tcx> {
}
}
-impl<'tcx> Debug for TerminatorKind<'tcx> {
- fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
- self.fmt_head(fmt)?;
- let successor_count = self.successors().count();
- let labels = self.fmt_successor_labels();
- assert_eq!(successor_count, labels.len());
-
- let unwind = match self.unwind() {
- // Not needed or included in successors
- None | Some(UnwindAction::Cleanup(_)) => None,
- Some(UnwindAction::Continue) => Some("unwind continue"),
- Some(UnwindAction::Unreachable) => Some("unwind unreachable"),
- Some(UnwindAction::Terminate) => Some("unwind terminate"),
- };
-
- match (successor_count, unwind) {
- (0, None) => Ok(()),
- (0, Some(unwind)) => write!(fmt, " -> {unwind}"),
- (1, None) => write!(fmt, " -> {:?}", self.successors().next().unwrap()),
- _ => {
- write!(fmt, " -> [")?;
- for (i, target) in self.successors().enumerate() {
- if i > 0 {
- write!(fmt, ", ")?;
- }
- write!(fmt, "{}: {:?}", labels[i], target)?;
- }
- if let Some(unwind) = unwind {
- write!(fmt, ", {unwind}")?;
- }
- write!(fmt, "]")
- }
- }
- }
-}
-
-impl<'tcx> TerminatorKind<'tcx> {
- /// Writes the "head" part of the terminator; that is, its name and the data it uses to pick the
- /// successor basic block, if any. The only information not included is the list of possible
- /// successors, which may be rendered differently between the text and the graphviz format.
- pub fn fmt_head<W: Write>(&self, fmt: &mut W) -> fmt::Result {
- use self::TerminatorKind::*;
- match self {
- Goto { .. } => write!(fmt, "goto"),
- SwitchInt { discr, .. } => write!(fmt, "switchInt({discr:?})"),
- Return => write!(fmt, "return"),
- GeneratorDrop => write!(fmt, "generator_drop"),
- Resume => write!(fmt, "resume"),
- Terminate => write!(fmt, "abort"),
- Yield { value, resume_arg, .. } => write!(fmt, "{resume_arg:?} = yield({value:?})"),
- Unreachable => write!(fmt, "unreachable"),
- Drop { place, .. } => write!(fmt, "drop({place:?})"),
- Call { func, args, destination, .. } => {
- write!(fmt, "{destination:?} = ")?;
- write!(fmt, "{func:?}(")?;
- for (index, arg) in args.iter().enumerate() {
- if index > 0 {
- write!(fmt, ", ")?;
- }
- write!(fmt, "{arg:?}")?;
- }
- write!(fmt, ")")
- }
- Assert { cond, expected, msg, .. } => {
- write!(fmt, "assert(")?;
- if !expected {
- write!(fmt, "!")?;
- }
- write!(fmt, "{cond:?}, ")?;
- msg.fmt_assert_args(fmt)?;
- write!(fmt, ")")
- }
- FalseEdge { .. } => write!(fmt, "falseEdge"),
- FalseUnwind { .. } => write!(fmt, "falseUnwind"),
- InlineAsm { template, ref operands, options, .. } => {
- write!(fmt, "asm!(\"{}\"", InlineAsmTemplatePiece::to_string(template))?;
- for op in operands {
- write!(fmt, ", ")?;
- let print_late = |&late| if late { "late" } else { "" };
- match op {
- InlineAsmOperand::In { reg, value } => {
- write!(fmt, "in({reg}) {value:?}")?;
- }
- InlineAsmOperand::Out { reg, late, place: Some(place) } => {
- write!(fmt, "{}out({}) {:?}", print_late(late), reg, place)?;
- }
- InlineAsmOperand::Out { reg, late, place: None } => {
- write!(fmt, "{}out({}) _", print_late(late), reg)?;
- }
- InlineAsmOperand::InOut {
- reg,
- late,
- in_value,
- out_place: Some(out_place),
- } => {
- write!(
- fmt,
- "in{}out({}) {:?} => {:?}",
- print_late(late),
- reg,
- in_value,
- out_place
- )?;
- }
- InlineAsmOperand::InOut { reg, late, in_value, out_place: None } => {
- write!(fmt, "in{}out({}) {:?} => _", print_late(late), reg, in_value)?;
- }
- InlineAsmOperand::Const { value } => {
- write!(fmt, "const {value:?}")?;
- }
- InlineAsmOperand::SymFn { value } => {
- write!(fmt, "sym_fn {value:?}")?;
- }
- InlineAsmOperand::SymStatic { def_id } => {
- write!(fmt, "sym_static {def_id:?}")?;
- }
- }
- }
- write!(fmt, ", options({options:?}))")
- }
- }
- }
-
- /// Returns the list of labels for the edges to the successor basic blocks.
- pub fn fmt_successor_labels(&self) -> Vec<Cow<'static, str>> {
- use self::TerminatorKind::*;
- match *self {
- Return | Resume | Terminate | Unreachable | GeneratorDrop => vec![],
- Goto { .. } => vec!["".into()],
- SwitchInt { ref targets, .. } => targets
- .values
- .iter()
- .map(|&u| Cow::Owned(u.to_string()))
- .chain(iter::once("otherwise".into()))
- .collect(),
- Call { target: Some(_), unwind: UnwindAction::Cleanup(_), .. } => {
- vec!["return".into(), "unwind".into()]
- }
- Call { target: Some(_), unwind: _, .. } => vec!["return".into()],
- Call { target: None, unwind: UnwindAction::Cleanup(_), .. } => vec!["unwind".into()],
- Call { target: None, unwind: _, .. } => vec![],
- Yield { drop: Some(_), .. } => vec!["resume".into(), "drop".into()],
- Yield { drop: None, .. } => vec!["resume".into()],
- Drop { unwind: UnwindAction::Cleanup(_), .. } => vec!["return".into(), "unwind".into()],
- Drop { unwind: _, .. } => vec!["return".into()],
- Assert { unwind: UnwindAction::Cleanup(_), .. } => {
- vec!["success".into(), "unwind".into()]
- }
- Assert { unwind: _, .. } => vec!["success".into()],
- FalseEdge { .. } => vec!["real".into(), "imaginary".into()],
- FalseUnwind { unwind: UnwindAction::Cleanup(_), .. } => {
- vec!["real".into(), "unwind".into()]
- }
- FalseUnwind { unwind: _, .. } => vec!["real".into()],
- InlineAsm { destination: Some(_), unwind: UnwindAction::Cleanup(_), .. } => {
- vec!["return".into(), "unwind".into()]
- }
- InlineAsm { destination: Some(_), unwind: _, .. } => {
- vec!["return".into()]
- }
- InlineAsm { destination: None, unwind: UnwindAction::Cleanup(_), .. } => {
- vec!["unwind".into()]
- }
- InlineAsm { destination: None, unwind: _, .. } => vec![],
- }
- }
-}
-
#[derive(Copy, Clone, Debug)]
pub enum TerminatorEdges<'mir, 'tcx> {
/// For terminators that have no successor, like `return`.
@@ -443,7 +449,8 @@ pub enum TerminatorEdges<'mir, 'tcx> {
/// Special action for `Yield`, `Call` and `InlineAsm` terminators.
AssignOnReturn {
return_: Option<BasicBlock>,
- unwind: UnwindAction,
+ /// The cleanup block, if it exists.
+ cleanup: Option<BasicBlock>,
place: CallReturnPlaces<'mir, 'tcx>,
},
/// Special edge for `SwitchInt`.
@@ -486,7 +493,9 @@ impl<'tcx> TerminatorKind<'tcx> {
pub fn edges(&self) -> TerminatorEdges<'_, 'tcx> {
use TerminatorKind::*;
match *self {
- Return | Resume | Terminate | GeneratorDrop | Unreachable => TerminatorEdges::None,
+ Return | UnwindResume | UnwindTerminate(_) | GeneratorDrop | Unreachable => {
+ TerminatorEdges::None
+ }
Goto { target } => TerminatorEdges::Single(target),
@@ -494,7 +503,7 @@ impl<'tcx> TerminatorKind<'tcx> {
| Drop { target, unwind, place: _, replace: _ }
| FalseUnwind { real_target: target, unwind } => match unwind {
UnwindAction::Cleanup(unwind) => TerminatorEdges::Double(target, unwind),
- UnwindAction::Continue | UnwindAction::Terminate | UnwindAction::Unreachable => {
+ UnwindAction::Continue | UnwindAction::Terminate(_) | UnwindAction::Unreachable => {
TerminatorEdges::Single(target)
}
},
@@ -506,7 +515,7 @@ impl<'tcx> TerminatorKind<'tcx> {
Yield { resume: target, drop, resume_arg, value: _ } => {
TerminatorEdges::AssignOnReturn {
return_: Some(target),
- unwind: drop.map_or(UnwindAction::Terminate, UnwindAction::Cleanup),
+ cleanup: drop,
place: CallReturnPlaces::Yield(resume_arg),
}
}
@@ -514,7 +523,7 @@ impl<'tcx> TerminatorKind<'tcx> {
Call { unwind, destination, target, func: _, args: _, fn_span: _, call_source: _ } => {
TerminatorEdges::AssignOnReturn {
return_: target,
- unwind,
+ cleanup: unwind.cleanup_block(),
place: CallReturnPlaces::Call(destination),
}
}
@@ -528,7 +537,7 @@ impl<'tcx> TerminatorKind<'tcx> {
unwind,
} => TerminatorEdges::AssignOnReturn {
return_: destination,
- unwind,
+ cleanup: unwind.cleanup_block(),
place: CallReturnPlaces::InlineAsm(operands),
},
diff --git a/compiler/rustc_middle/src/mir/traversal.rs b/compiler/rustc_middle/src/mir/traversal.rs
index ec16a8470..a1ff8410e 100644
--- a/compiler/rustc_middle/src/mir/traversal.rs
+++ b/compiler/rustc_middle/src/mir/traversal.rs
@@ -41,6 +41,12 @@ impl<'a, 'tcx> Preorder<'a, 'tcx> {
}
}
+/// Preorder traversal of a graph.
+///
+/// This function creates an iterator over the `Body`'s basic blocks, that
+/// returns basic blocks in a preorder.
+///
+/// See [`Preorder`]'s docs to learn what is preorder traversal.
pub fn preorder<'a, 'tcx>(body: &'a Body<'tcx>) -> Preorder<'a, 'tcx> {
Preorder::new(body, START_BLOCK)
}
@@ -178,7 +184,7 @@ impl<'a, 'tcx> Postorder<'a, 'tcx> {
// When we yield `C` and call `traverse_successor`, we push `B` to the stack, but
// since we've already visited `E`, that child isn't added to the stack. The last
// two iterations yield `B` and finally `A` for a final traversal of [E, D, C, B, A]
- while let Some(&mut (_, ref mut iter)) = self.visit_stack.last_mut() && let Some(bb) = iter.next_back() {
+ while let Some(bb) = self.visit_stack.last_mut().and_then(|(_, iter)| iter.next_back()) {
if self.visited.insert(bb) {
if let Some(term) = &self.basic_blocks[bb].terminator {
self.visit_stack.push((bb, term.successors()));
@@ -188,16 +194,14 @@ impl<'a, 'tcx> Postorder<'a, 'tcx> {
}
}
-impl<'a, 'tcx> Iterator for Postorder<'a, 'tcx> {
- type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+impl<'tcx> Iterator for Postorder<'_, 'tcx> {
+ type Item = BasicBlock;
- fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
- let next = self.visit_stack.pop();
- if next.is_some() {
- self.traverse_successor();
- }
+ fn next(&mut self) -> Option<BasicBlock> {
+ let (bb, _) = self.visit_stack.pop()?;
+ self.traverse_successor();
- next.map(|(bb, _)| (bb, &self.basic_blocks[bb]))
+ Some(bb)
}
fn size_hint(&self) -> (usize, Option<usize>) {
@@ -215,10 +219,14 @@ impl<'a, 'tcx> Iterator for Postorder<'a, 'tcx> {
}
}
-/// Creates an iterator over the `Body`'s basic blocks, that:
+/// Postorder traversal of a graph.
+///
+/// This function creates an iterator over the `Body`'s basic blocks, that:
/// - returns basic blocks in a postorder,
/// - traverses the `BasicBlocks` CFG cache's reverse postorder backwards, and does not cache the
/// postorder itself.
+///
+/// See [`Postorder`]'s docs to learn what is postorder traversal.
pub fn postorder<'a, 'tcx>(
body: &'a Body<'tcx>,
) -> impl Iterator<Item = (BasicBlock, &'a BasicBlockData<'tcx>)> + ExactSizeIterator + DoubleEndedIterator
@@ -226,7 +234,28 @@ pub fn postorder<'a, 'tcx>(
reverse_postorder(body).rev()
}
-/// Reverse postorder traversal of a graph
+/// Returns an iterator over all basic blocks reachable from the `START_BLOCK` in no particular
+/// order.
+///
+/// This is clearer than writing `preorder` in cases where the order doesn't matter.
+pub fn reachable<'a, 'tcx>(
+ body: &'a Body<'tcx>,
+) -> impl 'a + Iterator<Item = (BasicBlock, &'a BasicBlockData<'tcx>)> {
+ preorder(body)
+}
+
+/// Returns a `BitSet` containing all basic blocks reachable from the `START_BLOCK`.
+pub fn reachable_as_bitset(body: &Body<'_>) -> BitSet<BasicBlock> {
+ let mut iter = preorder(body);
+ iter.by_ref().for_each(drop);
+ iter.visited
+}
+
+/// Reverse postorder traversal of a graph.
+///
+/// This function creates an iterator over the `Body`'s basic blocks, that:
+/// - returns basic blocks in a reverse postorder,
+/// - makes use of the `BasicBlocks` CFG cache's reverse postorder.
///
/// Reverse postorder is the reverse order of a postorder traversal.
/// This is different to a preorder traversal and represents a natural
@@ -246,65 +275,6 @@ pub fn postorder<'a, 'tcx>(
/// A reverse postorder traversal of this graph is either `A B C D` or `A C B D`
/// Note that for a graph containing no loops (i.e., A DAG), this is equivalent to
/// a topological sort.
-///
-/// Construction of a `ReversePostorder` traversal requires doing a full
-/// postorder traversal of the graph, therefore this traversal should be
-/// constructed as few times as possible. Use the `reset` method to be able
-/// to re-use the traversal
-#[derive(Clone)]
-pub struct ReversePostorder<'a, 'tcx> {
- body: &'a Body<'tcx>,
- blocks: Vec<BasicBlock>,
- idx: usize,
-}
-
-impl<'a, 'tcx> ReversePostorder<'a, 'tcx> {
- pub fn new(body: &'a Body<'tcx>, root: BasicBlock) -> ReversePostorder<'a, 'tcx> {
- let blocks: Vec<_> = Postorder::new(&body.basic_blocks, root).map(|(bb, _)| bb).collect();
- let len = blocks.len();
- ReversePostorder { body, blocks, idx: len }
- }
-}
-
-impl<'a, 'tcx> Iterator for ReversePostorder<'a, 'tcx> {
- type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
-
- fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
- if self.idx == 0 {
- return None;
- }
- self.idx -= 1;
-
- self.blocks.get(self.idx).map(|&bb| (bb, &self.body[bb]))
- }
-
- fn size_hint(&self) -> (usize, Option<usize>) {
- (self.idx, Some(self.idx))
- }
-}
-
-impl<'a, 'tcx> ExactSizeIterator for ReversePostorder<'a, 'tcx> {}
-
-/// Returns an iterator over all basic blocks reachable from the `START_BLOCK` in no particular
-/// order.
-///
-/// This is clearer than writing `preorder` in cases where the order doesn't matter.
-pub fn reachable<'a, 'tcx>(
- body: &'a Body<'tcx>,
-) -> impl 'a + Iterator<Item = (BasicBlock, &'a BasicBlockData<'tcx>)> {
- preorder(body)
-}
-
-/// Returns a `BitSet` containing all basic blocks reachable from the `START_BLOCK`.
-pub fn reachable_as_bitset(body: &Body<'_>) -> BitSet<BasicBlock> {
- let mut iter = preorder(body);
- (&mut iter).for_each(drop);
- iter.visited
-}
-
-/// Creates an iterator over the `Body`'s basic blocks, that:
-/// - returns basic blocks in a reverse postorder,
-/// - makes use of the `BasicBlocks` CFG cache's reverse postorder.
pub fn reverse_postorder<'a, 'tcx>(
body: &'a Body<'tcx>,
) -> impl Iterator<Item = (BasicBlock, &'a BasicBlockData<'tcx>)> + ExactSizeIterator + DoubleEndedIterator
diff --git a/compiler/rustc_middle/src/mir/type_foldable.rs b/compiler/rustc_middle/src/mir/type_foldable.rs
index 06874741b..8d427fdb6 100644
--- a/compiler/rustc_middle/src/mir/type_foldable.rs
+++ b/compiler/rustc_middle/src/mir/type_foldable.rs
@@ -5,7 +5,7 @@ use rustc_ast::InlineAsmTemplatePiece;
use super::*;
use crate::ty;
-TrivialTypeTraversalAndLiftImpls! {
+TrivialTypeTraversalImpls! {
BlockTailInfo,
MirPhase,
SourceInfo,
diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs
index 069b38591..f2745b32c 100644
--- a/compiler/rustc_middle/src/mir/visit.rs
+++ b/compiler/rustc_middle/src/mir/visit.rs
@@ -186,7 +186,7 @@ macro_rules! make_mir_visitor {
fn visit_constant(
&mut self,
- constant: & $($mutability)? Constant<'tcx>,
+ constant: & $($mutability)? ConstOperand<'tcx>,
location: Location,
) {
self.super_constant(constant, location);
@@ -469,8 +469,8 @@ macro_rules! make_mir_visitor {
self.visit_source_info(source_info);
match kind {
TerminatorKind::Goto { .. } |
- TerminatorKind::Resume |
- TerminatorKind::Terminate |
+ TerminatorKind::UnwindResume |
+ TerminatorKind::UnwindTerminate(_) |
TerminatorKind::GeneratorDrop |
TerminatorKind::Unreachable |
TerminatorKind::FalseEdge { .. } |
@@ -647,8 +647,8 @@ macro_rules! make_mir_visitor {
BorrowKind::Shared => PlaceContext::NonMutatingUse(
NonMutatingUseContext::SharedBorrow
),
- BorrowKind::Shallow => PlaceContext::NonMutatingUse(
- NonMutatingUseContext::ShallowBorrow
+ BorrowKind::Fake => PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::FakeBorrow
),
BorrowKind::Mut { .. } =>
PlaceContext::MutatingUse(MutatingUseContext::Borrow),
@@ -838,12 +838,20 @@ macro_rules! make_mir_visitor {
let VarDebugInfo {
name: _,
source_info,
+ composite,
value,
argument_index: _,
} = var_debug_info;
self.visit_source_info(source_info);
let location = Location::START;
+ if let Some(box VarDebugInfoFragment { ref $($mutability)? ty, ref $($mutability)? projection }) = composite {
+ self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
+ for elem in projection {
+ let ProjectionElem::Field(_, ty) = elem else { bug!() };
+ self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
+ }
+ }
match value {
VarDebugInfoContents::Const(c) => self.visit_constant(c, location),
VarDebugInfoContents::Place(place) =>
@@ -852,17 +860,6 @@ macro_rules! make_mir_visitor {
PlaceContext::NonUse(NonUseContext::VarDebugInfo),
location
),
- VarDebugInfoContents::Composite { ty, fragments } => {
- // FIXME(eddyb) use a better `TyContext` here.
- self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
- for VarDebugInfoFragment { projection: _, contents } in fragments {
- self.visit_place(
- contents,
- PlaceContext::NonUse(NonUseContext::VarDebugInfo),
- location,
- );
- }
- }
}
}
@@ -873,20 +870,20 @@ macro_rules! make_mir_visitor {
fn super_constant(
&mut self,
- constant: & $($mutability)? Constant<'tcx>,
+ constant: & $($mutability)? ConstOperand<'tcx>,
location: Location
) {
- let Constant {
+ let ConstOperand {
span,
user_ty: _, // no visit method for this
- literal,
+ const_,
} = constant;
self.visit_span($(& $mutability)? *span);
- match literal {
- ConstantKind::Ty(ct) => self.visit_ty_const($(&$mutability)? *ct, location),
- ConstantKind::Val(_, ty) => self.visit_ty($(& $mutability)? *ty, TyContext::Location(location)),
- ConstantKind::Unevaluated(_, ty) => self.visit_ty($(& $mutability)? *ty, TyContext::Location(location)),
+ match const_ {
+ Const::Ty(ct) => self.visit_ty_const($(&$mutability)? *ct, location),
+ Const::Val(_, ty) => self.visit_ty($(& $mutability)? *ty, TyContext::Location(location)),
+ Const::Unevaluated(_, ty) => self.visit_ty($(& $mutability)? *ty, TyContext::Location(location)),
}
}
@@ -1112,6 +1109,11 @@ macro_rules! visit_place_fns {
self.visit_ty(&mut new_ty, TyContext::Location(location));
if ty != new_ty { Some(PlaceElem::OpaqueCast(new_ty)) } else { None }
}
+ PlaceElem::Subtype(ty) => {
+ let mut new_ty = ty;
+ self.visit_ty(&mut new_ty, TyContext::Location(location));
+ if ty != new_ty { Some(PlaceElem::Subtype(new_ty)) } else { None }
+ }
PlaceElem::Deref
| PlaceElem::ConstantIndex { .. }
| PlaceElem::Subslice { .. }
@@ -1178,7 +1180,9 @@ macro_rules! visit_place_fns {
location: Location,
) {
match elem {
- ProjectionElem::OpaqueCast(ty) | ProjectionElem::Field(_, ty) => {
+ ProjectionElem::OpaqueCast(ty)
+ | ProjectionElem::Subtype(ty)
+ | ProjectionElem::Field(_, ty) => {
self.visit_ty(ty, TyContext::Location(location));
}
ProjectionElem::Index(local) => {
@@ -1256,8 +1260,8 @@ pub enum NonMutatingUseContext {
Move,
/// Shared borrow.
SharedBorrow,
- /// Shallow borrow.
- ShallowBorrow,
+ /// A fake borrow.
+ FakeBorrow,
/// AddressOf for *const pointer.
AddressOf,
/// PlaceMention statement.
@@ -1336,7 +1340,7 @@ impl PlaceContext {
matches!(
self,
PlaceContext::NonMutatingUse(
- NonMutatingUseContext::SharedBorrow | NonMutatingUseContext::ShallowBorrow
+ NonMutatingUseContext::SharedBorrow | NonMutatingUseContext::FakeBorrow
) | PlaceContext::MutatingUse(MutatingUseContext::Borrow)
)
}
diff --git a/compiler/rustc_middle/src/query/erase.rs b/compiler/rustc_middle/src/query/erase.rs
index 348f79ed6..8ba3764bc 100644
--- a/compiler/rustc_middle/src/query/erase.rs
+++ b/compiler/rustc_middle/src/query/erase.rs
@@ -1,4 +1,5 @@
use crate::mir;
+use crate::query::CyclePlaceholder;
use crate::traits;
use crate::ty::{self, Ty};
use std::mem::{size_of, transmute_copy, MaybeUninit};
@@ -115,21 +116,16 @@ impl EraseType for Result<ty::Const<'_>, mir::interpret::LitToConstError> {
type Result = [u8; size_of::<Result<ty::Const<'static>, mir::interpret::LitToConstError>>()];
}
-impl EraseType for Result<mir::ConstantKind<'_>, mir::interpret::LitToConstError> {
- type Result =
- [u8; size_of::<Result<mir::ConstantKind<'static>, mir::interpret::LitToConstError>>()];
+impl EraseType for Result<mir::Const<'_>, mir::interpret::LitToConstError> {
+ type Result = [u8; size_of::<Result<mir::Const<'static>, mir::interpret::LitToConstError>>()];
}
-impl EraseType for Result<mir::interpret::ConstAlloc<'_>, mir::interpret::ErrorHandled> {
- type Result = [u8; size_of::<
- Result<mir::interpret::ConstAlloc<'static>, mir::interpret::ErrorHandled>,
- >()];
+impl EraseType for Result<mir::ConstAlloc<'_>, mir::interpret::ErrorHandled> {
+ type Result = [u8; size_of::<Result<mir::ConstAlloc<'static>, mir::interpret::ErrorHandled>>()];
}
-impl EraseType for Result<mir::interpret::ConstValue<'_>, mir::interpret::ErrorHandled> {
- type Result = [u8; size_of::<
- Result<mir::interpret::ConstValue<'static>, mir::interpret::ErrorHandled>,
- >()];
+impl EraseType for Result<mir::ConstValue<'_>, mir::interpret::ErrorHandled> {
+ type Result = [u8; size_of::<Result<mir::ConstValue<'static>, mir::interpret::ErrorHandled>>()];
}
impl EraseType for Result<Option<ty::ValTree<'_>>, mir::interpret::ErrorHandled> {
@@ -142,6 +138,10 @@ impl EraseType for Result<&'_ ty::List<Ty<'_>>, ty::util::AlwaysRequiresDrop> {
[u8; size_of::<Result<&'static ty::List<Ty<'static>>, ty::util::AlwaysRequiresDrop>>()];
}
+impl EraseType for Result<ty::EarlyBinder<Ty<'_>>, CyclePlaceholder> {
+ type Result = [u8; size_of::<Result<ty::EarlyBinder<Ty<'_>>, CyclePlaceholder>>()];
+}
+
impl<T> EraseType for Option<&'_ T> {
type Result = [u8; size_of::<Option<&'static ()>>()];
}
@@ -265,6 +265,7 @@ trivial! {
rustc_middle::ty::adjustment::CoerceUnsizedInfo,
rustc_middle::ty::AssocItem,
rustc_middle::ty::AssocItemContainer,
+ rustc_middle::ty::Asyncness,
rustc_middle::ty::BoundVariableKind,
rustc_middle::ty::DeducedParamAttrs,
rustc_middle::ty::Destructor,
@@ -310,10 +311,10 @@ macro_rules! tcx_lifetime {
tcx_lifetime! {
rustc_middle::hir::Owner,
rustc_middle::middle::exported_symbols::ExportedSymbol,
- rustc_middle::mir::ConstantKind,
+ rustc_middle::mir::Const,
rustc_middle::mir::DestructuredConstant,
- rustc_middle::mir::interpret::ConstAlloc,
- rustc_middle::mir::interpret::ConstValue,
+ rustc_middle::mir::ConstAlloc,
+ rustc_middle::mir::ConstValue,
rustc_middle::mir::interpret::GlobalId,
rustc_middle::mir::interpret::LitToConstInput,
rustc_middle::traits::query::MethodAutoderefStepsResult,
diff --git a/compiler/rustc_middle/src/query/keys.rs b/compiler/rustc_middle/src/query/keys.rs
index 01bdc4c99..b1f837968 100644
--- a/compiler/rustc_middle/src/query/keys.rs
+++ b/compiler/rustc_middle/src/query/keys.rs
@@ -2,7 +2,6 @@
use crate::infer::canonical::Canonical;
use crate::mir;
-use crate::mir::interpret::ConstValue;
use crate::traits;
use crate::ty::fast_reject::SimplifiedType;
use crate::ty::layout::{TyAndLayout, ValidityRequirement};
@@ -369,7 +368,7 @@ impl<'tcx> Key for (ty::Const<'tcx>, FieldIdx) {
}
}
-impl<'tcx> Key for (ConstValue<'tcx>, Ty<'tcx>) {
+impl<'tcx> Key for (mir::ConstValue<'tcx>, Ty<'tcx>) {
type CacheSelector = DefaultCacheSelector<Self>;
fn default_span(&self, _: TyCtxt<'_>) -> Span {
@@ -377,7 +376,7 @@ impl<'tcx> Key for (ConstValue<'tcx>, Ty<'tcx>) {
}
}
-impl<'tcx> Key for mir::interpret::ConstAlloc<'tcx> {
+impl<'tcx> Key for mir::ConstAlloc<'tcx> {
type CacheSelector = DefaultCacheSelector<Self>;
fn default_span(&self, _: TyCtxt<'_>) -> Span {
@@ -417,7 +416,7 @@ impl<'tcx> Key for GenericArg<'tcx> {
}
}
-impl<'tcx> Key for mir::ConstantKind<'tcx> {
+impl<'tcx> Key for mir::Const<'tcx> {
type CacheSelector = DefaultCacheSelector<Self>;
fn default_span(&self, _: TyCtxt<'_>) -> Span {
diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs
index 94ae0dcb5..340c5a769 100644
--- a/compiler/rustc_middle/src/query/mod.rs
+++ b/compiler/rustc_middle/src/query/mod.rs
@@ -7,7 +7,6 @@
#![allow(unused_parens)]
use crate::dep_graph;
-use crate::dep_graph::DepKind;
use crate::infer::canonical::{self, Canonical};
use crate::lint::LintExpectation;
use crate::metadata::ModChild;
@@ -21,12 +20,12 @@ use crate::middle::stability::{self, DeprecationEntry};
use crate::mir;
use crate::mir::interpret::GlobalId;
use crate::mir::interpret::{
- ConstValue, EvalToAllocationRawResult, EvalToConstValueResult, EvalToValTreeResult,
+ EvalToAllocationRawResult, EvalToConstValueResult, EvalToValTreeResult,
};
use crate::mir::interpret::{LitToConstError, LitToConstInput};
use crate::mir::mono::CodegenUnit;
use crate::query::erase::{erase, restore, Erase};
-use crate::query::plumbing::{query_ensure, query_get_at, DynamicQuery};
+use crate::query::plumbing::{query_ensure, query_get_at, CyclePlaceholder, DynamicQuery};
use crate::thir;
use crate::traits::query::{
CanonicalPredicateGoal, CanonicalProjectionGoal, CanonicalTyGoal,
@@ -45,7 +44,6 @@ use crate::traits::{
use crate::ty::fast_reject::SimplifiedType;
use crate::ty::layout::ValidityRequirement;
use crate::ty::util::AlwaysRequiresDrop;
-use crate::ty::GeneratorDiagnosticData;
use crate::ty::TyCtxtFeed;
use crate::ty::{
self, print::describe_as_module, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt,
@@ -231,7 +229,7 @@ rustc_queries! {
action = {
use rustc_hir::def::DefKind;
match tcx.def_kind(key) {
- DefKind::TyAlias { .. } => "expanding type alias",
+ DefKind::TyAlias => "expanding type alias",
DefKind::TraitAlias => "expanding trait alias",
_ => "computing type of",
}
@@ -243,6 +241,24 @@ rustc_queries! {
feedable
}
+ /// Specialized instance of `type_of` that detects cycles that are due to
+ /// revealing opaque because of an auto trait bound. Unless `CyclePlaceholder` needs
+ /// to be handled separately, call `type_of` instead.
+ query type_of_opaque(key: DefId) -> Result<ty::EarlyBinder<Ty<'tcx>>, CyclePlaceholder> {
+ desc { |tcx|
+ "computing type of opaque `{path}`",
+ path = tcx.def_path_str(key),
+ }
+ }
+
+ query type_alias_is_lazy(key: DefId) -> bool {
+ desc { |tcx|
+ "computing whether `{path}` is a lazy type alias",
+ path = tcx.def_path_str(key),
+ }
+ separate_provide_extern
+ }
+
query collect_return_position_impl_trait_in_trait_tys(key: DefId)
-> Result<&'tcx FxHashMap<DefId, ty::EarlyBinder<Ty<'tcx>>>, ErrorGuaranteed>
{
@@ -721,7 +737,7 @@ rustc_queries! {
separate_provide_extern
}
- query asyncness(key: DefId) -> hir::IsAsync {
+ query asyncness(key: DefId) -> ty::Asyncness {
desc { |tcx| "checking if the function is async: `{}`", tcx.def_path_str(key) }
separate_provide_extern
}
@@ -1081,7 +1097,7 @@ rustc_queries! {
}
/// Converts a type level constant value into `ConstValue`
- query valtree_to_const_val(key: (Ty<'tcx>, ty::ValTree<'tcx>)) -> ConstValue<'tcx> {
+ query valtree_to_const_val(key: (Ty<'tcx>, ty::ValTree<'tcx>)) -> mir::ConstValue<'tcx> {
desc { "converting type-level constant value to mir constant value"}
}
@@ -1091,17 +1107,7 @@ rustc_queries! {
desc { "destructuring type level constant"}
}
- /// Tries to destructure an `mir::ConstantKind` ADT or array into its variant index
- /// and its field values. This should only be used for pretty printing.
- query try_destructure_mir_constant_for_diagnostics(
- key: (ConstValue<'tcx>, Ty<'tcx>)
- ) -> Option<mir::DestructuredConstant<'tcx>> {
- desc { "destructuring MIR constant"}
- no_hash
- eval_always
- }
-
- query const_caller_location(key: (rustc_span::Symbol, u32, u32)) -> ConstValue<'tcx> {
+ query const_caller_location(key: (rustc_span::Symbol, u32, u32)) -> mir::ConstValue<'tcx> {
desc { "getting a &core::panic::Location referring to a span" }
}
@@ -1130,6 +1136,7 @@ rustc_queries! {
query reachable_set(_: ()) -> &'tcx LocalDefIdSet {
arena_cache
desc { "reachability" }
+ cache_on_disk_if { true }
}
/// Per-body `region::ScopeTree`. The `DefId` should be the owner `DefId` for the body;
@@ -2149,12 +2156,6 @@ rustc_queries! {
desc { "computing the backend features for CLI flags" }
}
- query generator_diagnostic_data(key: DefId) -> &'tcx Option<GeneratorDiagnosticData<'tcx>> {
- arena_cache
- desc { |tcx| "looking up generator diagnostic data of `{}`", tcx.def_path_str(key) }
- separate_provide_extern
- }
-
query check_validity_requirement(key: (ValidityRequirement, ty::ParamEnvAnd<'tcx, Ty<'tcx>>)) -> Result<bool, &'tcx ty::layout::LayoutError<'tcx>> {
desc { "checking validity requirement for `{}`: {}", key.1.value, key.0 }
}
diff --git a/compiler/rustc_middle/src/query/on_disk_cache.rs b/compiler/rustc_middle/src/query/on_disk_cache.rs
index 995b2140f..280f5d0a8 100644
--- a/compiler/rustc_middle/src/query/on_disk_cache.rs
+++ b/compiler/rustc_middle/src/query/on_disk_cache.rs
@@ -22,7 +22,7 @@ use rustc_span::hygiene::{
ExpnId, HygieneDecodeContext, HygieneEncodeContext, SyntaxContext, SyntaxContextData,
};
use rustc_span::source_map::{SourceMap, StableSourceFileId};
-use rustc_span::{BytePos, ExpnData, ExpnHash, Pos, SourceFile, Span};
+use rustc_span::{BytePos, ExpnData, ExpnHash, Pos, RelativeBytePos, SourceFile, Span};
use rustc_span::{CachingSourceMapView, Symbol};
use std::collections::hash_map::Entry;
use std::io;
@@ -688,11 +688,12 @@ impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for Span {
let file_lo_index = SourceFileIndex::decode(decoder);
let line_lo = usize::decode(decoder);
- let col_lo = BytePos::decode(decoder);
+ let col_lo = RelativeBytePos::decode(decoder);
let len = BytePos::decode(decoder);
let file_lo = decoder.file_index_to_file(file_lo_index);
- let lo = file_lo.lines(|lines| lines[line_lo - 1] + col_lo);
+ let lo = file_lo.lines()[line_lo - 1] + col_lo;
+ let lo = file_lo.absolute_position(lo);
let hi = lo + len;
Span::new(lo, hi, ctxt, parent)
@@ -895,7 +896,7 @@ impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for Span {
}
if let Some(parent) = span_data.parent {
- let enclosing = s.tcx.source_span(parent).data_untracked();
+ let enclosing = s.tcx.source_span_untracked(parent).data_untracked();
if enclosing.contains(span_data) {
TAG_RELATIVE_SPAN.encode(s);
(span_data.lo - enclosing.lo).to_u32().encode(s);
diff --git a/compiler/rustc_middle/src/query/plumbing.rs b/compiler/rustc_middle/src/query/plumbing.rs
index a1aac2846..34e5b02ba 100644
--- a/compiler/rustc_middle/src/query/plumbing.rs
+++ b/compiler/rustc_middle/src/query/plumbing.rs
@@ -19,7 +19,7 @@ use rustc_query_system::dep_graph::SerializedDepNodeIndex;
pub(crate) use rustc_query_system::query::QueryJobId;
use rustc_query_system::query::*;
use rustc_query_system::HandleCycleError;
-use rustc_span::{Span, DUMMY_SP};
+use rustc_span::{ErrorGuaranteed, Span, DUMMY_SP};
use std::ops::Deref;
pub struct QueryKeyStringCache {
@@ -37,7 +37,7 @@ pub struct DynamicQuery<'tcx, C: QueryCache> {
pub eval_always: bool,
pub dep_kind: DepKind,
pub handle_cycle_error: HandleCycleError,
- pub query_state: FieldOffset<QueryStates<'tcx>, QueryState<C::Key, DepKind>>,
+ pub query_state: FieldOffset<QueryStates<'tcx>, QueryState<C::Key>>,
pub query_cache: FieldOffset<QueryCaches<'tcx>, C>,
pub cache_on_disk: fn(tcx: TyCtxt<'tcx>, key: &C::Key) -> bool,
pub execute_query: fn(tcx: TyCtxt<'tcx>, k: C::Key) -> C::Value,
@@ -52,7 +52,8 @@ pub struct DynamicQuery<'tcx, C: QueryCache> {
pub loadable_from_disk:
fn(tcx: TyCtxt<'tcx>, key: &C::Key, index: SerializedDepNodeIndex) -> bool,
pub hash_result: HashResult<C::Value>,
- pub value_from_cycle_error: fn(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo<DepKind>]) -> C::Value,
+ pub value_from_cycle_error:
+ fn(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo], guar: ErrorGuaranteed) -> C::Value,
pub format_value: fn(&C::Value) -> String,
}
@@ -401,7 +402,7 @@ macro_rules! define_callbacks {
#[derive(Default)]
pub struct QueryStates<'tcx> {
$(
- pub $name: QueryState<$($K)*, DepKind>,
+ pub $name: QueryState<$($K)*>,
)*
}
@@ -515,7 +516,7 @@ macro_rules! define_feedable {
}
}
None => {
- let dep_node = dep_graph::DepNode::construct(tcx, dep_graph::DepKind::$name, &key);
+ let dep_node = dep_graph::DepNode::construct(tcx, dep_graph::dep_kinds::$name, &key);
let dep_node_index = tcx.dep_graph.with_feed_task(
dep_node,
tcx,
@@ -629,3 +630,6 @@ impl<'tcx> TyCtxtAt<'tcx> {
.unwrap_or_else(|| bug!("def_kind: unsupported node: {:?}", def_id))
}
}
+
+#[derive(Copy, Clone, Debug, HashStable)]
+pub struct CyclePlaceholder(pub ErrorGuaranteed);
diff --git a/compiler/rustc_middle/src/thir.rs b/compiler/rustc_middle/src/thir.rs
index ebc1c1190..89934e435 100644
--- a/compiler/rustc_middle/src/thir.rs
+++ b/compiler/rustc_middle/src/thir.rs
@@ -563,11 +563,11 @@ pub enum InlineAsmOperand<'tcx> {
out_expr: Option<ExprId>,
},
Const {
- value: mir::ConstantKind<'tcx>,
+ value: mir::Const<'tcx>,
span: Span,
},
SymFn {
- value: mir::ConstantKind<'tcx>,
+ value: mir::Const<'tcx>,
span: Span,
},
SymStatic {
@@ -732,14 +732,18 @@ pub enum PatKind<'tcx> {
},
/// One of the following:
- /// * `&str`, which will be handled as a string pattern and thus exhaustiveness
- /// checking will detect if you use the same string twice in different patterns.
- /// * integer, bool, char or float, which will be handled by exhaustiveness to cover exactly
- /// its own value, similar to `&str`, but these values are much simpler.
- /// * Opaque constants, that must not be matched structurally. So anything that does not derive
- /// `PartialEq` and `Eq`.
+ /// * `&str` (represented as a valtree), which will be handled as a string pattern and thus
+ /// exhaustiveness checking will detect if you use the same string twice in different
+ /// patterns.
+ /// * integer, bool, char or float (represented as a valtree), which will be handled by
+ /// exhaustiveness to cover exactly its own value, similar to `&str`, but these values are
+ /// much simpler.
+ /// * Opaque constants (represented as `mir::ConstValue`), that must not be matched
+ /// structurally. So anything that does not derive `PartialEq` and `Eq`.
+ ///
+ /// These are always compared with the matched place using (the semantics of) `PartialEq`.
Constant {
- value: mir::ConstantKind<'tcx>,
+ value: mir::Const<'tcx>,
},
Range(Box<PatRange<'tcx>>),
@@ -769,8 +773,8 @@ pub enum PatKind<'tcx> {
#[derive(Clone, Debug, PartialEq, HashStable)]
pub struct PatRange<'tcx> {
- pub lo: mir::ConstantKind<'tcx>,
- pub hi: mir::ConstantKind<'tcx>,
+ pub lo: mir::Const<'tcx>,
+ pub hi: mir::Const<'tcx>,
pub end: RangeEnd,
}
diff --git a/compiler/rustc_middle/src/thir/visit.rs b/compiler/rustc_middle/src/thir/visit.rs
index 681400dbb..b84e15688 100644
--- a/compiler/rustc_middle/src/thir/visit.rs
+++ b/compiler/rustc_middle/src/thir/visit.rs
@@ -26,13 +26,13 @@ pub trait Visitor<'a, 'tcx: 'a>: Sized {
walk_pat(self, pat);
}
- // Note: We don't have visitors for `ty::Const` and `mir::ConstantKind`
+ // Note: We don't have visitors for `ty::Const` and `mir::Const`
// (even though these types occur in THIR) for consistency and to reduce confusion,
// since the lazy creation of constants during thir construction causes most
- // 'constants' to not be of type `ty::Const` or `mir::ConstantKind` at that
+ // 'constants' to not be of type `ty::Const` or `mir::Const` at that
// stage (they are mostly still identified by `DefId` or `hir::Lit`, see
// the variants `Literal`, `NonHirLiteral` and `NamedConst` in `thir::ExprKind`).
- // You have to manually visit `ty::Const` and `mir::ConstantKind` through the
+ // You have to manually visit `ty::Const` and `mir::Const` through the
// other `visit*` functions.
}
diff --git a/compiler/rustc_middle/src/traits/mod.rs b/compiler/rustc_middle/src/traits/mod.rs
index 3465759b9..99b750c9a 100644
--- a/compiler/rustc_middle/src/traits/mod.rs
+++ b/compiler/rustc_middle/src/traits/mod.rs
@@ -13,7 +13,7 @@ use crate::infer::canonical::Canonical;
use crate::mir::ConstraintCategory;
use crate::ty::abstract_const::NotConstEvaluatable;
use crate::ty::GenericArgsRef;
-use crate::ty::{self, AdtKind, Ty, TyCtxt};
+use crate::ty::{self, AdtKind, Ty};
use rustc_data_structures::sync::Lrc;
use rustc_errors::{Applicability, Diagnostic};
@@ -86,7 +86,7 @@ pub enum Reveal {
///
/// We do not want to intern this as there are a lot of obligation causes which
/// only live for a short period of time.
-#[derive(Clone, Debug, PartialEq, Eq, Lift, HashStable, TyEncodable, TyDecodable)]
+#[derive(Clone, Debug, PartialEq, Eq, HashStable, TyEncodable, TyDecodable)]
#[derive(TypeVisitable, TypeFoldable)]
pub struct ObligationCause<'tcx> {
pub span: Span,
@@ -194,7 +194,7 @@ impl<'tcx> ObligationCause<'tcx> {
}
}
-#[derive(Clone, Debug, PartialEq, Eq, Lift, HashStable, TyEncodable, TyDecodable)]
+#[derive(Clone, Debug, PartialEq, Eq, HashStable, TyEncodable, TyDecodable)]
#[derive(TypeVisitable, TypeFoldable)]
pub struct UnifyReceiverContext<'tcx> {
pub assoc_item: ty::AssocItem,
@@ -202,7 +202,7 @@ pub struct UnifyReceiverContext<'tcx> {
pub args: GenericArgsRef<'tcx>,
}
-#[derive(Clone, PartialEq, Eq, Lift, Default, HashStable)]
+#[derive(Clone, PartialEq, Eq, Default, HashStable)]
#[derive(TypeVisitable, TypeFoldable, TyEncodable, TyDecodable)]
pub struct InternedObligationCauseCode<'tcx> {
/// `None` for `ObligationCauseCode::MiscObligation` (a common case, occurs ~60% of
@@ -238,7 +238,7 @@ impl<'tcx> std::ops::Deref for InternedObligationCauseCode<'tcx> {
}
}
-#[derive(Clone, Debug, PartialEq, Eq, Lift, HashStable, TyEncodable, TyDecodable)]
+#[derive(Clone, Debug, PartialEq, Eq, HashStable, TyEncodable, TyDecodable)]
#[derive(TypeVisitable, TypeFoldable)]
pub enum ObligationCauseCode<'tcx> {
/// Not well classified or should be obvious from the span.
@@ -299,6 +299,10 @@ pub enum ObligationCauseCode<'tcx> {
SizedYieldType,
/// Inline asm operand type must be `Sized`.
InlineAsmSized,
+ /// Captured closure type must be `Sized`.
+ SizedClosureCapture(LocalDefId),
+ /// Types live across generator yields must be `Sized`.
+ SizedGeneratorInterior(LocalDefId),
/// `[expr; N]` requires `type_of(expr): Copy`.
RepeatElementCopy {
/// If element is a `const fn` we display a help message suggesting to move the
@@ -378,6 +382,9 @@ pub enum ObligationCauseCode<'tcx> {
/// `start` has wrong type
StartFunctionType,
+ /// language function has wrong type
+ LangFunctionType(Symbol),
+
/// Intrinsic has wrong type
IntrinsicType,
@@ -470,7 +477,7 @@ pub enum WellFormedLoc {
},
}
-#[derive(Clone, Debug, PartialEq, Eq, Lift, HashStable, TyEncodable, TyDecodable)]
+#[derive(Clone, Debug, PartialEq, Eq, HashStable, TyEncodable, TyDecodable)]
#[derive(TypeVisitable, TypeFoldable)]
pub struct ImplDerivedObligationCause<'tcx> {
pub derived: DerivedObligationCause<'tcx>,
@@ -524,14 +531,7 @@ pub enum StatementAsExpression {
NeedsBoxing,
}
-impl<'tcx> ty::Lift<'tcx> for StatementAsExpression {
- type Lifted = StatementAsExpression;
- fn lift_to_tcx(self, _tcx: TyCtxt<'tcx>) -> Option<StatementAsExpression> {
- Some(self)
- }
-}
-
-#[derive(Clone, Debug, PartialEq, Eq, Lift, HashStable, TyEncodable, TyDecodable)]
+#[derive(Clone, Debug, PartialEq, Eq, HashStable, TyEncodable, TyDecodable)]
#[derive(TypeVisitable, TypeFoldable)]
pub struct MatchExpressionArmCause<'tcx> {
pub arm_block_id: Option<hir::HirId>,
@@ -547,7 +547,7 @@ pub struct MatchExpressionArmCause<'tcx> {
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-#[derive(Lift, TypeFoldable, TypeVisitable, HashStable, TyEncodable, TyDecodable)]
+#[derive(TypeFoldable, TypeVisitable, HashStable, TyEncodable, TyDecodable)]
pub struct IfExpressionCause<'tcx> {
pub then_id: hir::HirId,
pub else_id: hir::HirId,
@@ -557,7 +557,7 @@ pub struct IfExpressionCause<'tcx> {
pub opt_suggest_box_span: Option<Span>,
}
-#[derive(Clone, Debug, PartialEq, Eq, Lift, HashStable, TyEncodable, TyDecodable)]
+#[derive(Clone, Debug, PartialEq, Eq, HashStable, TyEncodable, TyDecodable)]
#[derive(TypeVisitable, TypeFoldable)]
pub struct DerivedObligationCause<'tcx> {
/// The trait predicate of the parent obligation that led to the
@@ -570,7 +570,7 @@ pub struct DerivedObligationCause<'tcx> {
pub parent_code: InternedObligationCauseCode<'tcx>,
}
-#[derive(Clone, Debug, TypeVisitable, Lift)]
+#[derive(Clone, Debug, TypeVisitable)]
pub enum SelectionError<'tcx> {
/// The trait is not implemented.
Unimplemented,
@@ -593,7 +593,7 @@ pub enum SelectionError<'tcx> {
OpaqueTypeAutoTraitLeakageUnknown(DefId),
}
-#[derive(Clone, Debug, TypeVisitable, Lift)]
+#[derive(Clone, Debug, TypeVisitable)]
pub struct SelectionOutputTypeParameterMismatch<'tcx> {
pub found_trait_ref: ty::PolyTraitRef<'tcx>,
pub expected_trait_ref: ty::PolyTraitRef<'tcx>,
@@ -638,7 +638,7 @@ pub type SelectionResult<'tcx, T> = Result<Option<T>, SelectionError<'tcx>>;
/// ### The type parameter `N`
///
/// See explanation on `ImplSourceUserDefinedData`.
-#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
#[derive(TypeFoldable, TypeVisitable)]
pub enum ImplSource<'tcx, N> {
/// ImplSource identifying a particular impl.
@@ -704,7 +704,7 @@ impl<'tcx, N> ImplSource<'tcx, N> {
/// is `Obligation`, as one might expect. During codegen, however, this
/// is `()`, because codegen only requires a shallow resolution of an
/// impl, and nested obligations are satisfied later.
-#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
#[derive(TypeFoldable, TypeVisitable)]
pub struct ImplSourceUserDefinedData<'tcx, N> {
pub impl_def_id: DefId,
@@ -736,7 +736,7 @@ pub enum BuiltinImplSource {
TupleUnsizing,
}
-TrivialTypeTraversalAndLiftImpls! { BuiltinImplSource }
+TrivialTypeTraversalImpls! { BuiltinImplSource }
#[derive(Clone, Debug, PartialEq, Eq, Hash, HashStable, PartialOrd, Ord)]
pub enum ObjectSafetyViolation {
diff --git a/compiler/rustc_middle/src/traits/query.rs b/compiler/rustc_middle/src/traits/query.rs
index 950a59e96..975e3e3ac 100644
--- a/compiler/rustc_middle/src/traits/query.rs
+++ b/compiler/rustc_middle/src/traits/query.rs
@@ -17,8 +17,7 @@ pub mod type_op {
use crate::ty::{Predicate, Ty, TyCtxt, UserType};
use std::fmt;
- #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, Lift)]
- #[derive(TypeFoldable, TypeVisitable)]
+ #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, TypeFoldable, TypeVisitable)]
pub struct AscribeUserType<'tcx> {
pub mir_ty: Ty<'tcx>,
pub user_ty: UserType<'tcx>,
@@ -30,22 +29,19 @@ pub mod type_op {
}
}
- #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, Lift)]
- #[derive(TypeFoldable, TypeVisitable)]
+ #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, TypeFoldable, TypeVisitable)]
pub struct Eq<'tcx> {
pub a: Ty<'tcx>,
pub b: Ty<'tcx>,
}
- #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, Lift)]
- #[derive(TypeFoldable, TypeVisitable)]
+ #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, TypeFoldable, TypeVisitable)]
pub struct Subtype<'tcx> {
pub sub: Ty<'tcx>,
pub sup: Ty<'tcx>,
}
- #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, Lift)]
- #[derive(TypeFoldable, TypeVisitable)]
+ #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, TypeFoldable, TypeVisitable)]
pub struct ProvePredicate<'tcx> {
pub predicate: Predicate<'tcx>,
}
@@ -56,8 +52,7 @@ pub mod type_op {
}
}
- #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, Lift)]
- #[derive(TypeFoldable, TypeVisitable)]
+ #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, TypeFoldable, TypeVisitable)]
pub struct Normalize<T> {
pub value: T,
}
@@ -101,7 +96,7 @@ impl<'tcx> From<TypeError<'tcx>> for NoSolution {
}
}
-#[derive(Clone, Debug, Default, HashStable, TypeFoldable, TypeVisitable, Lift)]
+#[derive(Clone, Debug, Default, HashStable, TypeFoldable, TypeVisitable)]
pub struct DropckOutlivesResult<'tcx> {
pub kinds: Vec<GenericArg<'tcx>>,
pub overflows: Vec<Ty<'tcx>>,
@@ -194,7 +189,7 @@ pub struct MethodAutoderefBadTy<'tcx> {
}
/// Result from the `normalize_projection_ty` query.
-#[derive(Clone, Debug, HashStable, TypeFoldable, TypeVisitable, Lift)]
+#[derive(Clone, Debug, HashStable, TypeFoldable, TypeVisitable)]
pub struct NormalizationResult<'tcx> {
/// Result of normalization.
pub normalized_ty: Ty<'tcx>,
@@ -207,7 +202,7 @@ pub struct NormalizationResult<'tcx> {
/// case they are called implied bounds). They are fed to the
/// `OutlivesEnv` which in turn is supplied to the region checker and
/// other parts of the inference system.
-#[derive(Clone, Debug, TypeFoldable, TypeVisitable, Lift, HashStable)]
+#[derive(Clone, Debug, TypeFoldable, TypeVisitable, HashStable)]
pub enum OutlivesBound<'tcx> {
RegionSubRegion(ty::Region<'tcx>, ty::Region<'tcx>),
RegionSubParam(ty::Region<'tcx>, ty::ParamTy),
diff --git a/compiler/rustc_middle/src/traits/select.rs b/compiler/rustc_middle/src/traits/select.rs
index ffae35798..90bc5dd8f 100644
--- a/compiler/rustc_middle/src/traits/select.rs
+++ b/compiler/rustc_middle/src/traits/select.rs
@@ -305,7 +305,7 @@ impl From<ErrorGuaranteed> for OverflowError {
}
}
-TrivialTypeTraversalAndLiftImpls! { OverflowError }
+TrivialTypeTraversalImpls! { OverflowError }
impl<'tcx> From<OverflowError> for SelectionError<'tcx> {
fn from(overflow_error: OverflowError) -> SelectionError<'tcx> {
diff --git a/compiler/rustc_middle/src/traits/solve.rs b/compiler/rustc_middle/src/traits/solve.rs
index 9d63d2918..27a1e64a7 100644
--- a/compiler/rustc_middle/src/traits/solve.rs
+++ b/compiler/rustc_middle/src/traits/solve.rs
@@ -9,6 +9,9 @@ use crate::ty::{
self, FallibleTypeFolder, ToPredicate, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeVisitable,
TypeVisitor,
};
+use rustc_span::def_id::DefId;
+
+use super::BuiltinImplSource;
mod cache;
pub mod inspect;
@@ -235,3 +238,63 @@ pub enum IsNormalizesToHack {
Yes,
No,
}
+
+/// Possible ways the given goal can be proven.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum CandidateSource {
+ /// A user written impl.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// fn main() {
+ /// let x: Vec<u32> = Vec::new();
+ /// // This uses the impl from the standard library to prove `Vec<T>: Clone`.
+ /// let y = x.clone();
+ /// }
+ /// ```
+ Impl(DefId),
+ /// A builtin impl generated by the compiler. When adding a new special
+ /// trait, try to use actual impls whenever possible. Builtin impls should
+ /// only be used in cases where the impl cannot be manually be written.
+ ///
+ /// Notable examples are auto traits, `Sized`, and `DiscriminantKind`.
+ /// For a list of all traits with builtin impls, check out the
+ /// `EvalCtxt::assemble_builtin_impl_candidates` method.
+ BuiltinImpl(BuiltinImplSource),
+ /// An assumption from the environment.
+ ///
+ /// More precisely we've used the `n-th` assumption in the `param_env`.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// fn is_clone<T: Clone>(x: T) -> (T, T) {
+ /// // This uses the assumption `T: Clone` from the `where`-bounds
+ /// // to prove `T: Clone`.
+ /// (x.clone(), x)
+ /// }
+ /// ```
+ ParamEnv(usize),
+ /// If the self type is an alias type, e.g. an opaque type or a projection,
+ /// we know the bounds on that alias to hold even without knowing its concrete
+ /// underlying type.
+ ///
+ /// More precisely this candidate is using the `n-th` bound in the `item_bounds` of
+ /// the self type.
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// trait Trait {
+ /// type Assoc: Clone;
+ /// }
+ ///
+ /// fn foo<T: Trait>(x: <T as Trait>::Assoc) {
+ /// // We prove `<T as Trait>::Assoc` by looking at the bounds on `Assoc` in
+ /// // in the trait definition.
+ /// let _y = x.clone();
+ /// }
+ /// ```
+ AliasBound,
+}
diff --git a/compiler/rustc_middle/src/traits/solve/inspect.rs b/compiler/rustc_middle/src/traits/solve/inspect.rs
index 4e2af3816..e7e40bee6 100644
--- a/compiler/rustc_middle/src/traits/solve/inspect.rs
+++ b/compiler/rustc_middle/src/traits/solve/inspect.rs
@@ -1,32 +1,83 @@
+//! Data structure used to inspect trait solver behavior.
+//!
+//! During trait solving we optionally build "proof trees", the root of
+//! which is a [GoalEvaluation] with [GoalEvaluationKind::Root]. These
+//! trees are used to improve the debug experience and are also used by
+//! the compiler itself to provide necessary context for error messages.
+//!
+//! Because each nested goal in the solver gets [canonicalized] separately
+//! and we discard inference progress via "probes", we cannot mechanically
+//! use proof trees without somehow "lifting up" data local to the current
+//! `InferCtxt`. Any data used mechanically is therefore canonicalized and
+//! stored as [CanonicalState]. As printing canonicalized data worsens the
+//! debugging dumps, we do not simply canonicalize everything.
+//!
+//! This means proof trees contain inference variables and placeholders
+//! local to a different `InferCtxt` which must not be used with the
+//! current one.
+//!
+//! [canonicalized]: https://rustc-dev-guide.rust-lang.org/solve/canonicalization.html
+
use super::{
- CanonicalInput, Certainty, Goal, IsNormalizesToHack, NoSolution, QueryInput, QueryResult,
+ CandidateSource, Canonical, CanonicalInput, Certainty, Goal, IsNormalizesToHack, NoSolution,
+ QueryInput, QueryResult,
};
-use crate::ty;
+use crate::{infer::canonical::CanonicalVarValues, ty};
use format::ProofTreeFormatter;
use std::fmt::{Debug, Write};
mod format;
-#[derive(Eq, PartialEq, Debug, Hash, HashStable)]
+/// Some `data` together with information about how they relate to the input
+/// of the canonical query.
+///
+/// This is only ever used as [CanonicalState]. Any type information in proof
+/// trees used mechanically has to be canonicalized as we otherwise leak
+/// inference variables from a nested `InferCtxt`.
+#[derive(Debug, Clone, Copy, Eq, PartialEq, TypeFoldable, TypeVisitable)]
+pub struct State<'tcx, T> {
+ pub var_values: CanonicalVarValues<'tcx>,
+ pub data: T,
+}
+
+pub type CanonicalState<'tcx, T> = Canonical<'tcx, State<'tcx, T>>;
+
+#[derive(Debug, Eq, PartialEq)]
pub enum CacheHit {
Provisional,
Global,
}
-#[derive(Eq, PartialEq, Hash, HashStable)]
+/// When evaluating the root goals we also store the
+/// original values for the `CanonicalVarValues` of the
+/// canonicalized goal. We use this to map any [CanonicalState]
+/// from the local `InferCtxt` of the solver query to
+/// the `InferCtxt` of the caller.
+#[derive(Eq, PartialEq)]
+pub enum GoalEvaluationKind<'tcx> {
+ Root { orig_values: Vec<ty::GenericArg<'tcx>> },
+ Nested { is_normalizes_to_hack: IsNormalizesToHack },
+}
+
+#[derive(Eq, PartialEq)]
pub struct GoalEvaluation<'tcx> {
pub uncanonicalized_goal: Goal<'tcx, ty::Predicate<'tcx>>,
- pub canonicalized_goal: CanonicalInput<'tcx>,
-
pub kind: GoalEvaluationKind<'tcx>,
- pub is_normalizes_to_hack: IsNormalizesToHack,
+ pub evaluation: CanonicalGoalEvaluation<'tcx>,
+ /// The nested goals from instantiating the query response.
pub returned_goals: Vec<Goal<'tcx, ty::Predicate<'tcx>>>,
+}
+#[derive(Eq, PartialEq)]
+pub struct CanonicalGoalEvaluation<'tcx> {
+ pub goal: CanonicalInput<'tcx>,
+ pub kind: CanonicalGoalEvaluationKind<'tcx>,
pub result: QueryResult<'tcx>,
}
-#[derive(Eq, PartialEq, Hash, HashStable)]
-pub enum GoalEvaluationKind<'tcx> {
+#[derive(Eq, PartialEq)]
+pub enum CanonicalGoalEvaluationKind<'tcx> {
+ Overflow,
CacheHit(CacheHit),
Uncached { revisions: Vec<GoalEvaluationStep<'tcx>> },
}
@@ -36,55 +87,69 @@ impl Debug for GoalEvaluation<'_> {
}
}
-#[derive(Eq, PartialEq, Hash, HashStable)]
+#[derive(Eq, PartialEq)]
pub struct AddedGoalsEvaluation<'tcx> {
pub evaluations: Vec<Vec<GoalEvaluation<'tcx>>>,
pub result: Result<Certainty, NoSolution>,
}
-impl Debug for AddedGoalsEvaluation<'_> {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- ProofTreeFormatter::new(f).format_nested_goal_evaluation(self)
- }
-}
-#[derive(Eq, PartialEq, Hash, HashStable)]
+#[derive(Eq, PartialEq)]
pub struct GoalEvaluationStep<'tcx> {
pub instantiated_goal: QueryInput<'tcx, ty::Predicate<'tcx>>,
- pub nested_goal_evaluations: Vec<AddedGoalsEvaluation<'tcx>>,
- pub candidates: Vec<GoalCandidate<'tcx>>,
+ /// The actual evaluation of the goal, always `ProbeKind::Root`.
+ pub evaluation: Probe<'tcx>,
+}
- pub result: QueryResult<'tcx>,
+/// A self-contained computation during trait solving. This either
+/// corresponds to a `EvalCtxt::probe(_X)` call or the root evaluation
+/// of a goal.
+#[derive(Eq, PartialEq)]
+pub struct Probe<'tcx> {
+ /// What happened inside of this probe in chronological order.
+ pub steps: Vec<ProbeStep<'tcx>>,
+ pub kind: ProbeKind<'tcx>,
}
-impl Debug for GoalEvaluationStep<'_> {
+
+impl Debug for Probe<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- ProofTreeFormatter::new(f).format_evaluation_step(self)
+ ProofTreeFormatter::new(f).format_probe(self)
}
}
-#[derive(Eq, PartialEq, Hash, HashStable)]
-pub struct GoalCandidate<'tcx> {
- pub nested_goal_evaluations: Vec<AddedGoalsEvaluation<'tcx>>,
- pub candidates: Vec<GoalCandidate<'tcx>>,
- pub kind: CandidateKind<'tcx>,
+#[derive(Eq, PartialEq)]
+pub enum ProbeStep<'tcx> {
+ /// We added a goal to the `EvalCtxt` which will get proven
+ /// the next time `EvalCtxt::try_evaluate_added_goals` is called.
+ AddGoal(CanonicalState<'tcx, Goal<'tcx, ty::Predicate<'tcx>>>),
+ /// The inside of a `EvalCtxt::try_evaluate_added_goals` call.
+ EvaluateGoals(AddedGoalsEvaluation<'tcx>),
+ /// A call to `probe` while proving the current goal. This is
+ /// used whenever there are multiple candidates to prove the
+ /// current goalby .
+ NestedProbe(Probe<'tcx>),
}
-#[derive(Eq, PartialEq, Debug, Hash, HashStable)]
-pub enum CandidateKind<'tcx> {
+/// What kind of probe we're in. In case the probe represents a candidate, or
+/// the final result of the current goal - via [ProbeKind::Root] - we also
+/// store the [QueryResult].
+#[derive(Debug, PartialEq, Eq, Clone, Copy)]
+pub enum ProbeKind<'tcx> {
+ /// The root inference context while proving a goal.
+ Root { result: QueryResult<'tcx> },
/// Probe entered when normalizing the self ty during candidate assembly
NormalizedSelfTyAssembly,
- /// A normal candidate for proving a goal
- Candidate { name: String, result: QueryResult<'tcx> },
+ /// Some candidate to prove the current goal.
+ ///
+ /// FIXME: Remove this in favor of always using more strongly typed variants.
+ MiscCandidate { name: &'static str, result: QueryResult<'tcx> },
+ /// A candidate for proving a trait or alias-relate goal.
+ TraitCandidate { source: CandidateSource, result: QueryResult<'tcx> },
/// Used in the probe that wraps normalizing the non-self type for the unsize
/// trait, which is also structurally matched on.
UnsizeAssembly,
/// During upcasting from some source object to target object type, used to
/// do a probe to find out what projection type(s) may be used to prove that
/// the source type upholds all of the target type's object bounds.
- UpcastProbe,
-}
-impl Debug for GoalCandidate<'_> {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- ProofTreeFormatter::new(f).format_candidate(self)
- }
+ UpcastProjectionCompatibility,
}
diff --git a/compiler/rustc_middle/src/traits/solve/inspect/format.rs b/compiler/rustc_middle/src/traits/solve/inspect/format.rs
index 8759fecb0..5733be00a 100644
--- a/compiler/rustc_middle/src/traits/solve/inspect/format.rs
+++ b/compiler/rustc_middle/src/traits/solve/inspect/format.rs
@@ -39,44 +39,55 @@ impl<'a, 'b> ProofTreeFormatter<'a, 'b> {
func(&mut ProofTreeFormatter { f: &mut Indentor { f: self.f, on_newline: true } })
}
- pub(super) fn format_goal_evaluation(&mut self, goal: &GoalEvaluation<'_>) -> std::fmt::Result {
- let goal_text = match goal.is_normalizes_to_hack {
- IsNormalizesToHack::Yes => "NORMALIZES-TO HACK GOAL",
- IsNormalizesToHack::No => "GOAL",
+ pub(super) fn format_goal_evaluation(&mut self, eval: &GoalEvaluation<'_>) -> std::fmt::Result {
+ let goal_text = match eval.kind {
+ GoalEvaluationKind::Root { orig_values: _ } => "ROOT GOAL",
+ GoalEvaluationKind::Nested { is_normalizes_to_hack } => match is_normalizes_to_hack {
+ IsNormalizesToHack::No => "GOAL",
+ IsNormalizesToHack::Yes => "NORMALIZES-TO HACK GOAL",
+ },
};
-
- writeln!(self.f, "{}: {:?}", goal_text, goal.uncanonicalized_goal)?;
- writeln!(self.f, "CANONICALIZED: {:?}", goal.canonicalized_goal)?;
-
- match &goal.kind {
- GoalEvaluationKind::CacheHit(CacheHit::Global) => {
- writeln!(self.f, "GLOBAL CACHE HIT: {:?}", goal.result)
- }
- GoalEvaluationKind::CacheHit(CacheHit::Provisional) => {
- writeln!(self.f, "PROVISIONAL CACHE HIT: {:?}", goal.result)
- }
- GoalEvaluationKind::Uncached { revisions } => {
- for (n, step) in revisions.iter().enumerate() {
- writeln!(self.f, "REVISION {n}: {:?}", step.result)?;
- self.nested(|this| this.format_evaluation_step(step))?;
- }
- writeln!(self.f, "RESULT: {:?}", goal.result)
- }
- }?;
-
- if goal.returned_goals.len() > 0 {
+ writeln!(self.f, "{}: {:?}", goal_text, eval.uncanonicalized_goal)?;
+ self.nested(|this| this.format_canonical_goal_evaluation(&eval.evaluation))?;
+ if eval.returned_goals.len() > 0 {
writeln!(self.f, "NESTED GOALS ADDED TO CALLER: [")?;
self.nested(|this| {
- for goal in goal.returned_goals.iter() {
+ for goal in eval.returned_goals.iter() {
writeln!(this.f, "ADDED GOAL: {goal:?},")?;
}
Ok(())
})?;
- writeln!(self.f, "]")?;
+ writeln!(self.f, "]")
+ } else {
+ Ok(())
}
+ }
- Ok(())
+ pub(super) fn format_canonical_goal_evaluation(
+ &mut self,
+ eval: &CanonicalGoalEvaluation<'_>,
+ ) -> std::fmt::Result {
+ writeln!(self.f, "GOAL: {:?}", eval.goal)?;
+
+ match &eval.kind {
+ CanonicalGoalEvaluationKind::Overflow => {
+ writeln!(self.f, "OVERFLOW: {:?}", eval.result)
+ }
+ CanonicalGoalEvaluationKind::CacheHit(CacheHit::Global) => {
+ writeln!(self.f, "GLOBAL CACHE HIT: {:?}", eval.result)
+ }
+ CanonicalGoalEvaluationKind::CacheHit(CacheHit::Provisional) => {
+ writeln!(self.f, "PROVISIONAL CACHE HIT: {:?}", eval.result)
+ }
+ CanonicalGoalEvaluationKind::Uncached { revisions } => {
+ for (n, step) in revisions.iter().enumerate() {
+ writeln!(self.f, "REVISION {n}")?;
+ self.nested(|this| this.format_evaluation_step(step))?;
+ }
+ writeln!(self.f, "RESULT: {:?}", eval.result)
+ }
+ }
}
pub(super) fn format_evaluation_step(
@@ -84,54 +95,53 @@ impl<'a, 'b> ProofTreeFormatter<'a, 'b> {
evaluation_step: &GoalEvaluationStep<'_>,
) -> std::fmt::Result {
writeln!(self.f, "INSTANTIATED: {:?}", evaluation_step.instantiated_goal)?;
-
- for candidate in &evaluation_step.candidates {
- self.nested(|this| this.format_candidate(candidate))?;
- }
- for nested in &evaluation_step.nested_goal_evaluations {
- self.nested(|this| this.format_nested_goal_evaluation(nested))?;
- }
-
- Ok(())
+ self.format_probe(&evaluation_step.evaluation)
}
- pub(super) fn format_candidate(&mut self, candidate: &GoalCandidate<'_>) -> std::fmt::Result {
- match &candidate.kind {
- CandidateKind::NormalizedSelfTyAssembly => {
+ pub(super) fn format_probe(&mut self, probe: &Probe<'_>) -> std::fmt::Result {
+ match &probe.kind {
+ ProbeKind::Root { result } => {
+ writeln!(self.f, "ROOT RESULT: {result:?}")
+ }
+ ProbeKind::NormalizedSelfTyAssembly => {
writeln!(self.f, "NORMALIZING SELF TY FOR ASSEMBLY:")
}
- CandidateKind::UnsizeAssembly => {
+ ProbeKind::UnsizeAssembly => {
writeln!(self.f, "ASSEMBLING CANDIDATES FOR UNSIZING:")
}
- CandidateKind::UpcastProbe => {
+ ProbeKind::UpcastProjectionCompatibility => {
writeln!(self.f, "PROBING FOR PROJECTION COMPATIBILITY FOR UPCASTING:")
}
- CandidateKind::Candidate { name, result } => {
+ ProbeKind::MiscCandidate { name, result } => {
writeln!(self.f, "CANDIDATE {name}: {result:?}")
}
+ ProbeKind::TraitCandidate { source, result } => {
+ writeln!(self.f, "CANDIDATE {source:?}: {result:?}")
+ }
}?;
self.nested(|this| {
- for candidate in &candidate.candidates {
- this.format_candidate(candidate)?;
- }
- for nested in &candidate.nested_goal_evaluations {
- this.format_nested_goal_evaluation(nested)?;
+ for step in &probe.steps {
+ match step {
+ ProbeStep::AddGoal(goal) => writeln!(this.f, "ADDED GOAL: {goal:?}")?,
+ ProbeStep::EvaluateGoals(eval) => this.format_added_goals_evaluation(eval)?,
+ ProbeStep::NestedProbe(probe) => this.format_probe(probe)?,
+ }
}
Ok(())
})
}
- pub(super) fn format_nested_goal_evaluation(
+ pub(super) fn format_added_goals_evaluation(
&mut self,
- nested_goal_evaluation: &AddedGoalsEvaluation<'_>,
+ added_goals_evaluation: &AddedGoalsEvaluation<'_>,
) -> std::fmt::Result {
- writeln!(self.f, "TRY_EVALUATE_ADDED_GOALS: {:?}", nested_goal_evaluation.result)?;
+ writeln!(self.f, "TRY_EVALUATE_ADDED_GOALS: {:?}", added_goals_evaluation.result)?;
- for (n, revision) in nested_goal_evaluation.evaluations.iter().enumerate() {
- writeln!(self.f, "REVISION {n}")?;
+ for (n, iterations) in added_goals_evaluation.evaluations.iter().enumerate() {
+ writeln!(self.f, "ITERATION {n}")?;
self.nested(|this| {
- for goal_evaluation in revision {
+ for goal_evaluation in iterations {
this.format_goal_evaluation(goal_evaluation)?;
}
Ok(())
diff --git a/compiler/rustc_middle/src/ty/abstract_const.rs b/compiler/rustc_middle/src/ty/abstract_const.rs
index cdd835149..570f896ba 100644
--- a/compiler/rustc_middle/src/ty/abstract_const.rs
+++ b/compiler/rustc_middle/src/ty/abstract_const.rs
@@ -27,7 +27,7 @@ impl From<ErrorGuaranteed> for NotConstEvaluatable {
}
}
-TrivialTypeTraversalAndLiftImpls! { NotConstEvaluatable }
+TrivialTypeTraversalImpls! { NotConstEvaluatable }
pub type BoundAbstractConst<'tcx> = Result<Option<EarlyBinder<ty::Const<'tcx>>>, ErrorGuaranteed>;
diff --git a/compiler/rustc_middle/src/ty/adjustment.rs b/compiler/rustc_middle/src/ty/adjustment.rs
index 76931ceaa..c3e8991c6 100644
--- a/compiler/rustc_middle/src/ty/adjustment.rs
+++ b/compiler/rustc_middle/src/ty/adjustment.rs
@@ -76,7 +76,7 @@ pub enum PointerCoercion {
/// At some point, of course, `Box` should move out of the compiler, in which
/// case this is analogous to transforming a struct. E.g., `Box<[i32; 4]>` ->
/// `Box<[i32]>` is an `Adjust::Unsize` with the target `Box<[i32]>`.
-#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable, Lift)]
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
pub struct Adjustment<'tcx> {
pub kind: Adjust<'tcx>,
pub target: Ty<'tcx>,
@@ -88,7 +88,7 @@ impl<'tcx> Adjustment<'tcx> {
}
}
-#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable, Lift)]
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
pub enum Adjust<'tcx> {
/// Go from ! to any type.
NeverToAny,
@@ -110,7 +110,7 @@ pub enum Adjust<'tcx> {
/// The target type is `U` in both cases, with the region and mutability
/// being those shared by both the receiver and the returned reference.
#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
-#[derive(TypeFoldable, TypeVisitable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
pub struct OverloadedDeref<'tcx> {
pub region: ty::Region<'tcx>,
pub mutbl: hir::Mutability,
@@ -182,7 +182,7 @@ impl From<AutoBorrowMutability> for hir::Mutability {
}
#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
-#[derive(TypeFoldable, TypeVisitable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
pub enum AutoBorrow<'tcx> {
/// Converts from T to &T.
Ref(ty::Region<'tcx>, AutoBorrowMutability),
diff --git a/compiler/rustc_middle/src/ty/adt.rs b/compiler/rustc_middle/src/ty/adt.rs
index b4c6e0d97..f50969dd9 100644
--- a/compiler/rustc_middle/src/ty/adt.rs
+++ b/compiler/rustc_middle/src/ty/adt.rs
@@ -448,7 +448,7 @@ impl<'tcx> AdtDef<'tcx> {
Res::Def(DefKind::Ctor(..), cid) => self.variant_with_ctor_id(cid),
Res::Def(DefKind::Struct, _)
| Res::Def(DefKind::Union, _)
- | Res::Def(DefKind::TyAlias { .. }, _)
+ | Res::Def(DefKind::TyAlias, _)
| Res::Def(DefKind::AssocTy, _)
| Res::SelfTyParam { .. }
| Res::SelfTyAlias { .. }
@@ -478,8 +478,8 @@ impl<'tcx> AdtDef<'tcx> {
}
Err(err) => {
let msg = match err {
- ErrorHandled::Reported(_) => "enum discriminant evaluation failed",
- ErrorHandled::TooGeneric => "enum discriminant depends on generics",
+ ErrorHandled::Reported(..) => "enum discriminant evaluation failed",
+ ErrorHandled::TooGeneric(..) => "enum discriminant depends on generics",
};
tcx.sess.delay_span_bug(tcx.def_span(expr_did), msg);
None
diff --git a/compiler/rustc_middle/src/ty/binding.rs b/compiler/rustc_middle/src/ty/binding.rs
index 2fec8ac90..af594bc5f 100644
--- a/compiler/rustc_middle/src/ty/binding.rs
+++ b/compiler/rustc_middle/src/ty/binding.rs
@@ -6,7 +6,7 @@ pub enum BindingMode {
BindByValue(Mutability),
}
-TrivialTypeTraversalAndLiftImpls! { BindingMode }
+TrivialTypeTraversalImpls! { BindingMode }
impl BindingMode {
pub fn convert(BindingAnnotation(by_ref, mutbl): BindingAnnotation) -> BindingMode {
diff --git a/compiler/rustc_middle/src/ty/codec.rs b/compiler/rustc_middle/src/ty/codec.rs
index 7c05deae9..dff7ff8c6 100644
--- a/compiler/rustc_middle/src/ty/codec.rs
+++ b/compiler/rustc_middle/src/ty/codec.rs
@@ -566,6 +566,5 @@ impl_binder_encode_decode! {
ty::TraitPredicate<'tcx>,
ty::ExistentialPredicate<'tcx>,
ty::TraitRef<'tcx>,
- Vec<ty::GeneratorInteriorTypeCause<'tcx>>,
ty::ExistentialTraitRef<'tcx>,
}
diff --git a/compiler/rustc_middle/src/ty/consts.rs b/compiler/rustc_middle/src/ty/consts.rs
index cce10417e..2518f0cf2 100644
--- a/compiler/rustc_middle/src/ty/consts.rs
+++ b/compiler/rustc_middle/src/ty/consts.rs
@@ -1,5 +1,5 @@
use crate::middle::resolve_bound_vars as rbv;
-use crate::mir::interpret::{AllocId, ConstValue, LitToConstInput, Scalar};
+use crate::mir::interpret::{AllocId, ErrorHandled, LitToConstInput, Scalar};
use crate::ty::{self, GenericArgs, ParamEnv, ParamEnvAnd, Ty, TyCtxt, TypeVisitableExt};
use rustc_data_structures::intern::Interned;
use rustc_error_messages::MultiSpan;
@@ -14,9 +14,8 @@ mod valtree;
pub use int::*;
pub use kind::*;
-use rustc_span::ErrorGuaranteed;
+use rustc_span::Span;
use rustc_span::DUMMY_SP;
-use rustc_target::abi::Size;
pub use valtree::*;
use super::sty::ConstKind;
@@ -36,16 +35,6 @@ pub struct ConstData<'tcx> {
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(ConstData<'_>, 40);
-enum EvalMode {
- Typeck,
- Mir,
-}
-
-enum EvalResult<'tcx> {
- ValTree(ty::ValTree<'tcx>),
- ConstVal(ConstValue<'tcx>),
-}
-
impl<'tcx> Const<'tcx> {
#[inline]
pub fn ty(self) -> Ty<'tcx> {
@@ -165,7 +154,7 @@ impl<'tcx> Const<'tcx> {
let ty = tcx.type_of(def).no_bound_vars().expect("const parameter types cannot be generic");
- match Self::try_eval_lit_or_param(tcx, ty, expr) {
+ match Self::try_from_lit_or_param(tcx, ty, expr) {
Some(v) => v,
None => ty::Const::new_unevaluated(
tcx,
@@ -179,7 +168,7 @@ impl<'tcx> Const<'tcx> {
}
#[instrument(skip(tcx), level = "debug")]
- fn try_eval_lit_or_param(
+ fn try_from_lit_or_param(
tcx: TyCtxt<'tcx>,
ty: Ty<'tcx>,
expr: &'tcx hir::Expr<'tcx>,
@@ -254,14 +243,6 @@ impl<'tcx> Const<'tcx> {
}
}
- /// Panics if self.kind != ty::ConstKind::Value
- pub fn to_valtree(self) -> ty::ValTree<'tcx> {
- match self.kind() {
- ty::ConstKind::Value(valtree) => valtree,
- _ => bug!("expected ConstKind::Value, got {:?}", self.kind()),
- }
- }
-
#[inline]
/// Creates a constant with the given integer value and interns it.
pub fn from_bits(tcx: TyCtxt<'tcx>, bits: u128, ty: ParamEnvAnd<'tcx, Ty<'tcx>>) -> Self {
@@ -294,33 +275,83 @@ impl<'tcx> Const<'tcx> {
Self::from_bits(tcx, n as u128, ParamEnv::empty().and(tcx.types.usize))
}
- /// Attempts to convert to a `ValTree`
- pub fn try_to_valtree(self) -> Option<ty::ValTree<'tcx>> {
+ /// Returns the evaluated constant
+ #[inline]
+ pub fn eval(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ span: Option<Span>,
+ ) -> Result<ValTree<'tcx>, ErrorHandled> {
+ assert!(!self.has_escaping_bound_vars(), "escaping vars in {self:?}");
match self.kind() {
- ty::ConstKind::Value(valtree) => Some(valtree),
- _ => None,
+ ConstKind::Unevaluated(unevaluated) => {
+ // FIXME(eddyb) maybe the `const_eval_*` methods should take
+ // `ty::ParamEnvAnd` instead of having them separate.
+ let (param_env, unevaluated) = unevaluated.prepare_for_eval(tcx, param_env);
+ // try to resolve e.g. associated constants to their definition on an impl, and then
+ // evaluate the const.
+ let c = tcx.const_eval_resolve_for_typeck(param_env, unevaluated, span)?;
+ Ok(c.expect("`ty::Const::eval` called on a non-valtree-compatible type"))
+ }
+ ConstKind::Value(val) => Ok(val),
+ ConstKind::Error(g) => Err(g.into()),
+ ConstKind::Param(_)
+ | ConstKind::Infer(_)
+ | ConstKind::Bound(_, _)
+ | ConstKind::Placeholder(_)
+ | ConstKind::Expr(_) => Err(ErrorHandled::TooGeneric(span.unwrap_or(DUMMY_SP))),
}
}
+ /// Normalizes the constant to a value or an error if possible.
+ #[inline]
+ pub fn normalize(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Self {
+ match self.eval(tcx, param_env, None) {
+ Ok(val) => Self::new_value(tcx, val, self.ty()),
+ Err(ErrorHandled::Reported(r, _span)) => Self::new_error(tcx, r.into(), self.ty()),
+ Err(ErrorHandled::TooGeneric(_span)) => self,
+ }
+ }
+
+ #[inline]
+ pub fn try_eval_scalar(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Option<Scalar> {
+ self.eval(tcx, param_env, None).ok()?.try_to_scalar()
+ }
+
#[inline]
/// Attempts to evaluate the given constant to bits. Can fail to evaluate in the presence of
/// generics (or erroneous code) or if the value can't be represented as bits (e.g. because it
/// contains const generic parameters or pointers).
- pub fn try_eval_bits(
+ pub fn try_eval_scalar_int(
self,
tcx: TyCtxt<'tcx>,
param_env: ParamEnv<'tcx>,
- ty: Ty<'tcx>,
- ) -> Option<u128> {
- assert_eq!(self.ty(), ty);
- let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
+ ) -> Option<ScalarInt> {
+ self.try_eval_scalar(tcx, param_env)?.try_to_int().ok()
+ }
+
+ #[inline]
+ /// Attempts to evaluate the given constant to bits. Can fail to evaluate in the presence of
+ /// generics (or erroneous code) or if the value can't be represented as bits (e.g. because it
+ /// contains const generic parameters or pointers).
+ pub fn try_eval_bits(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Option<u128> {
+ let int = self.try_eval_scalar_int(tcx, param_env)?;
+ let size =
+ tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(self.ty())).ok()?.size;
// if `ty` does not depend on generic parameters, use an empty param_env
- self.eval(tcx, param_env).try_to_bits(size)
+ int.to_bits(size).ok()
}
#[inline]
- pub fn try_eval_bool(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Option<bool> {
- self.eval(tcx, param_env).try_to_bool()
+ /// Panics if the value cannot be evaluated or doesn't contain a valid integer of the given type.
+ pub fn eval_bits(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> u128 {
+ self.try_eval_bits(tcx, param_env)
+ .unwrap_or_else(|| bug!("expected bits of {:#?}, got {:#?}", self.ty(), self))
}
#[inline]
@@ -329,29 +360,12 @@ impl<'tcx> Const<'tcx> {
tcx: TyCtxt<'tcx>,
param_env: ParamEnv<'tcx>,
) -> Option<u64> {
- self.eval(tcx, param_env).try_to_target_usize(tcx)
- }
-
- #[inline]
- /// Tries to evaluate the constant if it is `Unevaluated`. If that doesn't succeed, return the
- /// unevaluated constant.
- pub fn eval(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Const<'tcx> {
- if let Some(val) = self.try_eval_for_typeck(tcx, param_env) {
- match val {
- Ok(val) => ty::Const::new_value(tcx, val, self.ty()),
- Err(guar) => ty::Const::new_error(tcx, guar, self.ty()),
- }
- } else {
- // Either the constant isn't evaluatable or ValTree creation failed.
- self
- }
+ self.try_eval_scalar_int(tcx, param_env)?.try_to_target_usize(tcx).ok()
}
#[inline]
- /// Panics if the value cannot be evaluated or doesn't contain a valid integer of the given type.
- pub fn eval_bits(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, ty: Ty<'tcx>) -> u128 {
- self.try_eval_bits(tcx, param_env, ty)
- .unwrap_or_else(|| bug!("expected bits of {:#?}, got {:#?}", ty, self))
+ pub fn try_eval_bool(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Option<bool> {
+ self.try_eval_scalar_int(tcx, param_env)?.try_into().ok()
}
#[inline]
@@ -361,136 +375,30 @@ impl<'tcx> Const<'tcx> {
.unwrap_or_else(|| bug!("expected usize, got {:#?}", self))
}
- #[inline]
- /// Tries to evaluate the constant if it is `Unevaluated`. If that isn't possible or necessary
- /// return `None`.
- // FIXME(@lcnr): Completely rework the evaluation/normalization system for `ty::Const` once valtrees are merged.
- pub fn try_eval_for_mir(
- self,
- tcx: TyCtxt<'tcx>,
- param_env: ParamEnv<'tcx>,
- ) -> Option<Result<ConstValue<'tcx>, ErrorGuaranteed>> {
- match self.try_eval_inner(tcx, param_env, EvalMode::Mir) {
- Some(Ok(EvalResult::ValTree(_))) => unreachable!(),
- Some(Ok(EvalResult::ConstVal(v))) => Some(Ok(v)),
- Some(Err(e)) => Some(Err(e)),
- None => None,
- }
- }
-
- #[inline]
- /// Tries to evaluate the constant if it is `Unevaluated`. If that isn't possible or necessary
- /// return `None`.
- // FIXME(@lcnr): Completely rework the evaluation/normalization system for `ty::Const` once valtrees are merged.
- pub fn try_eval_for_typeck(
- self,
- tcx: TyCtxt<'tcx>,
- param_env: ParamEnv<'tcx>,
- ) -> Option<Result<ty::ValTree<'tcx>, ErrorGuaranteed>> {
- match self.try_eval_inner(tcx, param_env, EvalMode::Typeck) {
- Some(Ok(EvalResult::ValTree(v))) => Some(Ok(v)),
- Some(Ok(EvalResult::ConstVal(_))) => unreachable!(),
- Some(Err(e)) => Some(Err(e)),
- None => None,
+ /// Panics if self.kind != ty::ConstKind::Value
+ pub fn to_valtree(self) -> ty::ValTree<'tcx> {
+ match self.kind() {
+ ty::ConstKind::Value(valtree) => valtree,
+ _ => bug!("expected ConstKind::Value, got {:?}", self.kind()),
}
}
- #[inline]
- fn try_eval_inner(
- self,
- tcx: TyCtxt<'tcx>,
- param_env: ParamEnv<'tcx>,
- eval_mode: EvalMode,
- ) -> Option<Result<EvalResult<'tcx>, ErrorGuaranteed>> {
- assert!(!self.has_escaping_bound_vars(), "escaping vars in {self:?}");
- if let ConstKind::Unevaluated(unevaluated) = self.kind() {
- use crate::mir::interpret::ErrorHandled;
-
- // HACK(eddyb) this erases lifetimes even though `const_eval_resolve`
- // also does later, but we want to do it before checking for
- // inference variables.
- // Note that we erase regions *before* calling `with_reveal_all_normalized`,
- // so that we don't try to invoke this query with
- // any region variables.
-
- // HACK(eddyb) when the query key would contain inference variables,
- // attempt using identity args and `ParamEnv` instead, that will succeed
- // when the expression doesn't depend on any parameters.
- // FIXME(eddyb, skinny121) pass `InferCtxt` into here when it's available, so that
- // we can call `infcx.const_eval_resolve` which handles inference variables.
- let param_env_and = if (param_env, unevaluated).has_non_region_infer() {
- tcx.param_env(unevaluated.def).and(ty::UnevaluatedConst {
- def: unevaluated.def,
- args: GenericArgs::identity_for_item(tcx, unevaluated.def),
- })
- } else {
- tcx.erase_regions(param_env)
- .with_reveal_all_normalized(tcx)
- .and(tcx.erase_regions(unevaluated))
- };
-
- // FIXME(eddyb) maybe the `const_eval_*` methods should take
- // `ty::ParamEnvAnd` instead of having them separate.
- let (param_env, unevaluated) = param_env_and.into_parts();
- // try to resolve e.g. associated constants to their definition on an impl, and then
- // evaluate the const.
- match eval_mode {
- EvalMode::Typeck => {
- match tcx.const_eval_resolve_for_typeck(param_env, unevaluated, None) {
- // NOTE(eddyb) `val` contains no lifetimes/types/consts,
- // and we use the original type, so nothing from `args`
- // (which may be identity args, see above),
- // can leak through `val` into the const we return.
- Ok(val) => Some(Ok(EvalResult::ValTree(val?))),
- Err(ErrorHandled::TooGeneric) => None,
- Err(ErrorHandled::Reported(e)) => Some(Err(e.into())),
- }
- }
- EvalMode::Mir => {
- match tcx.const_eval_resolve(param_env, unevaluated.expand(), None) {
- // NOTE(eddyb) `val` contains no lifetimes/types/consts,
- // and we use the original type, so nothing from `args`
- // (which may be identity args, see above),
- // can leak through `val` into the const we return.
- Ok(val) => Some(Ok(EvalResult::ConstVal(val))),
- Err(ErrorHandled::TooGeneric) => None,
- Err(ErrorHandled::Reported(e)) => Some(Err(e.into())),
- }
- }
- }
- } else {
- None
+ /// Attempts to convert to a `ValTree`
+ pub fn try_to_valtree(self) -> Option<ty::ValTree<'tcx>> {
+ match self.kind() {
+ ty::ConstKind::Value(valtree) => Some(valtree),
+ _ => None,
}
}
#[inline]
- pub fn try_to_value(self) -> Option<ty::ValTree<'tcx>> {
- if let ConstKind::Value(val) = self.kind() { Some(val) } else { None }
- }
-
- #[inline]
pub fn try_to_scalar(self) -> Option<Scalar<AllocId>> {
- self.try_to_value()?.try_to_scalar()
- }
-
- #[inline]
- pub fn try_to_scalar_int(self) -> Option<ScalarInt> {
- self.try_to_value()?.try_to_scalar_int()
- }
-
- #[inline]
- pub fn try_to_bits(self, size: Size) -> Option<u128> {
- self.try_to_scalar_int()?.to_bits(size).ok()
- }
-
- #[inline]
- pub fn try_to_bool(self) -> Option<bool> {
- self.try_to_scalar_int()?.try_into().ok()
+ self.try_to_valtree()?.try_to_scalar()
}
#[inline]
pub fn try_to_target_usize(self, tcx: TyCtxt<'tcx>) -> Option<u64> {
- self.try_to_value()?.try_to_target_usize(tcx)
+ self.try_to_valtree()?.try_to_target_usize(tcx)
}
pub fn is_ct_infer(self) -> bool {
diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs
index b16163edf..9d99344d5 100644
--- a/compiler/rustc_middle/src/ty/consts/int.rs
+++ b/compiler/rustc_middle/src/ty/consts/int.rs
@@ -227,6 +227,11 @@ impl ScalarInt {
}
#[inline]
+ pub fn try_from_target_usize(i: impl Into<u128>, tcx: TyCtxt<'_>) -> Option<Self> {
+ Self::try_from_uint(i, tcx.data_layout.pointer_size)
+ }
+
+ #[inline]
pub fn assert_bits(self, target_size: Size) -> u128 {
self.to_bits(target_size).unwrap_or_else(|size| {
bug!("expected int of size {}, but got size {}", target_size.bytes(), size.bytes())
diff --git a/compiler/rustc_middle/src/ty/consts/kind.rs b/compiler/rustc_middle/src/ty/consts/kind.rs
index db4a15fbe..749b54ca0 100644
--- a/compiler/rustc_middle/src/ty/consts/kind.rs
+++ b/compiler/rustc_middle/src/ty/consts/kind.rs
@@ -2,13 +2,13 @@ use super::Const;
use crate::mir;
use crate::ty::abstract_const::CastKind;
use crate::ty::GenericArgsRef;
-use crate::ty::{self, List, Ty};
+use crate::ty::{self, visit::TypeVisitableExt as _, List, Ty, TyCtxt};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_hir::def_id::DefId;
use rustc_macros::HashStable;
/// An unevaluated (potentially generic) constant used in the type-system.
-#[derive(Copy, Clone, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Lift)]
+#[derive(Copy, Clone, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)]
#[derive(Hash, HashStable, TypeFoldable, TypeVisitable)]
pub struct UnevaluatedConst<'tcx> {
pub def: DefId,
@@ -22,9 +22,37 @@ impl rustc_errors::IntoDiagnosticArg for UnevaluatedConst<'_> {
}
impl<'tcx> UnevaluatedConst<'tcx> {
+ /// FIXME(RalfJung): I cannot explain what this does or why it makes sense, but not doing this
+ /// hurts performance.
#[inline]
- pub fn expand(self) -> mir::UnevaluatedConst<'tcx> {
- mir::UnevaluatedConst { def: self.def, args: self.args, promoted: None }
+ pub(crate) fn prepare_for_eval(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> (ty::ParamEnv<'tcx>, Self) {
+ // HACK(eddyb) this erases lifetimes even though `const_eval_resolve`
+ // also does later, but we want to do it before checking for
+ // inference variables.
+ // Note that we erase regions *before* calling `with_reveal_all_normalized`,
+ // so that we don't try to invoke this query with
+ // any region variables.
+
+ // HACK(eddyb) when the query key would contain inference variables,
+ // attempt using identity args and `ParamEnv` instead, that will succeed
+ // when the expression doesn't depend on any parameters.
+ // FIXME(eddyb, skinny121) pass `InferCtxt` into here when it's available, so that
+ // we can call `infcx.const_eval_resolve` which handles inference variables.
+ if (param_env, self).has_non_region_infer() {
+ (
+ tcx.param_env(self.def),
+ ty::UnevaluatedConst {
+ def: self.def,
+ args: ty::GenericArgs::identity_for_item(tcx, self.def),
+ },
+ )
+ } else {
+ (tcx.erase_regions(param_env).with_reveal_all_normalized(tcx), tcx.erase_regions(self))
+ }
}
}
@@ -55,6 +83,11 @@ static_assert_size!(super::ConstKind<'_>, 32);
pub enum InferConst<'tcx> {
/// Infer the value of the const.
Var(ty::ConstVid<'tcx>),
+ /// Infer the value of the effect.
+ ///
+ /// For why this is separate from the `Var` variant above, see the
+ /// documentation on `EffectVid`.
+ EffectVar(ty::EffectVid<'tcx>),
/// A fresh const variable. See `infer::freshen` for more details.
Fresh(u32),
}
@@ -62,7 +95,9 @@ pub enum InferConst<'tcx> {
impl<CTX> HashStable<CTX> for InferConst<'_> {
fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
match self {
- InferConst::Var(_) => panic!("const variables should not be hashed: {self:?}"),
+ InferConst::Var(_) | InferConst::EffectVar(_) => {
+ panic!("const variables should not be hashed: {self:?}")
+ }
InferConst::Fresh(i) => i.hash_stable(hcx, hasher),
}
}
diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs
index be839e03c..c06b8b2df 100644
--- a/compiler/rustc_middle/src/ty/context.rs
+++ b/compiler/rustc_middle/src/ty/context.rs
@@ -39,7 +39,7 @@ use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::sharded::{IntoPointer, ShardedHashMap};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::steal::Steal;
-use rustc_data_structures::sync::{self, Lock, Lrc, MappedReadGuard, ReadGuard, WorkerLocal};
+use rustc_data_structures::sync::{self, FreezeReadGuard, Lock, Lrc, WorkerLocal};
use rustc_data_structures::unord::UnordSet;
use rustc_errors::{
DecorateLint, DiagnosticBuilder, DiagnosticMessage, ErrorGuaranteed, MultiSpan,
@@ -50,7 +50,7 @@ use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
use rustc_hir::definitions::Definitions;
use rustc_hir::intravisit::Visitor;
use rustc_hir::lang_items::LangItem;
-use rustc_hir::{Constness, HirId, Node, TraitCandidate};
+use rustc_hir::{HirId, Node, TraitCandidate};
use rustc_index::IndexVec;
use rustc_macros::HashStable;
use rustc_query_system::dep_graph::DepNodeIndex;
@@ -82,6 +82,7 @@ use std::ops::{Bound, Deref};
impl<'tcx> Interner for TyCtxt<'tcx> {
type AdtDef = ty::AdtDef<'tcx>;
type GenericArgsRef = ty::GenericArgsRef<'tcx>;
+ type GenericArg = ty::GenericArg<'tcx>;
type DefId = DefId;
type Binder<T> = Binder<'tcx, T>;
type Ty = Ty<'tcx>;
@@ -317,7 +318,7 @@ pub struct CommonLifetimes<'tcx> {
pub re_vars: Vec<Region<'tcx>>,
/// Pre-interned values of the form:
- /// `ReLateBound(DebruijnIndex(i), BoundRegion { var: v, kind: BrAnon(None) })`
+ /// `ReLateBound(DebruijnIndex(i), BoundRegion { var: v, kind: BrAnon })`
/// for small values of `i` and `v`.
pub re_late_bounds: Vec<Vec<Region<'tcx>>>,
}
@@ -394,7 +395,7 @@ impl<'tcx> CommonLifetimes<'tcx> {
.map(|v| {
mk(ty::ReLateBound(
ty::DebruijnIndex::from(i),
- ty::BoundRegion { var: ty::BoundVar::from(v), kind: ty::BrAnon(None) },
+ ty::BoundRegion { var: ty::BoundVar::from(v), kind: ty::BrAnon },
))
})
.collect()
@@ -553,6 +554,10 @@ pub struct GlobalCtxt<'tcx> {
/// Common consts, pre-interned for your convenience.
pub consts: CommonConsts<'tcx>,
+ /// Hooks to be able to register functions in other crates that can then still
+ /// be called from rustc_middle.
+ pub(crate) hooks: crate::hooks::Providers,
+
untracked: Untracked,
pub query_system: QuerySystem<'tcx>,
@@ -647,7 +652,7 @@ impl<'tcx> TyCtxt<'tcx> {
// Create an allocation that just contains these bytes.
let alloc = interpret::Allocation::from_bytes_byte_aligned_immutable(bytes);
let alloc = self.mk_const_alloc(alloc);
- self.create_memory_alloc(alloc)
+ self.reserve_and_set_memory_alloc(alloc)
}
/// Returns a range of the start/end indices specified with the
@@ -702,6 +707,7 @@ impl<'tcx> TyCtxt<'tcx> {
dep_graph: DepGraph,
query_kinds: &'tcx [DepKindStruct<'tcx>],
query_system: QuerySystem<'tcx>,
+ hooks: crate::hooks::Providers,
) -> GlobalCtxt<'tcx> {
let data_layout = s.target.parse_data_layout().unwrap_or_else(|err| {
s.emit_fatal(err);
@@ -720,6 +726,7 @@ impl<'tcx> TyCtxt<'tcx> {
hir_arena,
interners,
dep_graph,
+ hooks,
prof: s.prof.clone(),
types: common_types,
lifetimes: common_lifetimes,
@@ -964,8 +971,8 @@ impl<'tcx> TyCtxt<'tcx> {
i += 1;
}
- // Leak a read lock once we finish iterating on definitions, to prevent adding new ones.
- definitions.leak();
+ // Freeze definitions once we finish iterating on them, to prevent adding new ones.
+ definitions.freeze();
})
}
@@ -974,10 +981,9 @@ impl<'tcx> TyCtxt<'tcx> {
// definitions change.
self.dep_graph.read_index(DepNodeIndex::FOREVER_RED_NODE);
- // Leak a read lock once we start iterating on definitions, to prevent adding new ones
+ // Freeze definitions once we start iterating on them, to prevent adding new ones
// while iterating. If some query needs to add definitions, it should be `ensure`d above.
- let definitions = self.untracked.definitions.leak();
- definitions.def_path_table()
+ self.untracked.definitions.freeze().def_path_table()
}
pub fn def_path_hash_to_def_index_map(
@@ -986,17 +992,16 @@ impl<'tcx> TyCtxt<'tcx> {
// Create a dependency to the crate to be sure we re-execute this when the amount of
// definitions change.
self.ensure().hir_crate(());
- // Leak a read lock once we start iterating on definitions, to prevent adding new ones
+ // Freeze definitions once we start iterating on them, to prevent adding new ones
// while iterating. If some query needs to add definitions, it should be `ensure`d above.
- let definitions = self.untracked.definitions.leak();
- definitions.def_path_hash_to_def_index_map()
+ self.untracked.definitions.freeze().def_path_hash_to_def_index_map()
}
/// Note that this is *untracked* and should only be used within the query
/// system if the result is otherwise tracked through queries
#[inline]
- pub fn cstore_untracked(self) -> MappedReadGuard<'tcx, CrateStoreDyn> {
- ReadGuard::map(self.untracked.cstore.read(), |c| &**c)
+ pub fn cstore_untracked(self) -> FreezeReadGuard<'tcx, CrateStoreDyn> {
+ FreezeReadGuard::map(self.untracked.cstore.read(), |c| &**c)
}
/// Give out access to the untracked data without any sanity checks.
@@ -1006,7 +1011,7 @@ impl<'tcx> TyCtxt<'tcx> {
/// Note that this is *untracked* and should only be used within the query
/// system if the result is otherwise tracked through queries
#[inline]
- pub fn definitions_untracked(self) -> ReadGuard<'tcx, Definitions> {
+ pub fn definitions_untracked(self) -> FreezeReadGuard<'tcx, Definitions> {
self.untracked.definitions.read()
}
@@ -1109,7 +1114,7 @@ impl<'tcx> TyCtxt<'tcx> {
if let Some(hir::FnDecl { output: hir::FnRetTy::Return(hir_output), .. }) = self.hir().fn_decl_by_hir_id(hir_id)
&& let hir::TyKind::Path(hir::QPath::Resolved(
None,
- hir::Path { res: hir::def::Res::Def(DefKind::TyAlias { .. }, def_id), .. }, )) = hir_output.kind
+ hir::Path { res: hir::def::Res::Def(DefKind::TyAlias, def_id), .. }, )) = hir_output.kind
&& let Some(local_id) = def_id.as_local()
&& let Some(alias_ty) = self.hir().get_by_def_id(local_id).alias_ty() // it is type alias
&& let Some(alias_generics) = self.hir().get_by_def_id(local_id).generics()
@@ -1216,6 +1221,25 @@ macro_rules! nop_lift {
impl<'a, 'tcx> Lift<'tcx> for $ty {
type Lifted = $lifted;
fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ // Assert that the set has the right type.
+ // Given an argument that has an interned type, the return type has the type of
+ // the corresponding interner set. This won't actually return anything, we're
+ // just doing this to compute said type!
+ fn _intern_set_ty_from_interned_ty<'tcx, Inner>(
+ _x: Interned<'tcx, Inner>,
+ ) -> InternedSet<'tcx, Inner> {
+ unreachable!()
+ }
+ fn _type_eq<T>(_x: &T, _y: &T) {}
+ fn _test<'tcx>(x: $lifted, tcx: TyCtxt<'tcx>) {
+ // If `x` is a newtype around an `Interned<T>`, then `interner` is an
+ // interner of appropriate type. (Ideally we'd also check that `x` is a
+ // newtype with just that one field. Not sure how to do that.)
+ let interner = _intern_set_ty_from_interned_ty(x.0);
+ // Now check that this is the same type as `interners.$set`.
+ _type_eq(&interner, &tcx.interners.$set);
+ }
+
tcx.interners
.$set
.contains_pointer_to(&InternedInSet(&*self.0.0))
@@ -1232,6 +1256,11 @@ macro_rules! nop_list_lift {
impl<'a, 'tcx> Lift<'tcx> for &'a List<$ty> {
type Lifted = &'tcx List<$lifted>;
fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ // Assert that the set has the right type.
+ if false {
+ let _x: &InternedSet<'tcx, List<$lifted>> = &tcx.interners.$set;
+ }
+
if self.is_empty() {
return Some(List::empty());
}
@@ -1253,19 +1282,13 @@ nop_lift! {predicate; Clause<'a> => Clause<'tcx>}
nop_list_lift! {type_lists; Ty<'a> => Ty<'tcx>}
nop_list_lift! {poly_existential_predicates; PolyExistentialPredicate<'a> => PolyExistentialPredicate<'tcx>}
-nop_list_lift! {clauses; Clause<'a> => Clause<'tcx>}
-nop_list_lift! {canonical_var_infos; CanonicalVarInfo<'a> => CanonicalVarInfo<'tcx>}
-nop_list_lift! {projs; ProjectionKind => ProjectionKind}
nop_list_lift! {bound_variable_kinds; ty::BoundVariableKind => ty::BoundVariableKind}
// This is the impl for `&'a GenericArgs<'a>`.
nop_list_lift! {args; GenericArg<'a> => GenericArg<'tcx>}
-CloneLiftImpls! {
- Constness,
- traits::WellFormedLoc,
+TrivialLiftImpls! {
ImplPolarity,
- crate::mir::ReturnConstraint,
}
macro_rules! sty_debug_print {
@@ -1296,25 +1319,26 @@ macro_rules! sty_debug_print {
};
$(let mut $variant = total;)*
- let shards = tcx.interners.type_.lock_shards();
- let types = shards.iter().flat_map(|shard| shard.keys());
- for &InternedInSet(t) in types {
- let variant = match t.internee {
- ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) |
- ty::Float(..) | ty::Str | ty::Never => continue,
- ty::Error(_) => /* unimportant */ continue,
- $(ty::$variant(..) => &mut $variant,)*
- };
- let lt = t.flags.intersects(ty::TypeFlags::HAS_RE_INFER);
- let ty = t.flags.intersects(ty::TypeFlags::HAS_TY_INFER);
- let ct = t.flags.intersects(ty::TypeFlags::HAS_CT_INFER);
-
- variant.total += 1;
- total.total += 1;
- if lt { total.lt_infer += 1; variant.lt_infer += 1 }
- if ty { total.ty_infer += 1; variant.ty_infer += 1 }
- if ct { total.ct_infer += 1; variant.ct_infer += 1 }
- if lt && ty && ct { total.all_infer += 1; variant.all_infer += 1 }
+ for shard in tcx.interners.type_.lock_shards() {
+ let types = shard.keys();
+ for &InternedInSet(t) in types {
+ let variant = match t.internee {
+ ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) |
+ ty::Float(..) | ty::Str | ty::Never => continue,
+ ty::Error(_) => /* unimportant */ continue,
+ $(ty::$variant(..) => &mut $variant,)*
+ };
+ let lt = t.flags.intersects(ty::TypeFlags::HAS_RE_INFER);
+ let ty = t.flags.intersects(ty::TypeFlags::HAS_TY_INFER);
+ let ct = t.flags.intersects(ty::TypeFlags::HAS_CT_INFER);
+
+ variant.total += 1;
+ total.total += 1;
+ if lt { total.lt_infer += 1; variant.lt_infer += 1 }
+ if ty { total.ty_infer += 1; variant.ty_infer += 1 }
+ if ct { total.ct_infer += 1; variant.ct_infer += 1 }
+ if lt && ty && ct { total.all_infer += 1; variant.all_infer += 1 }
+ }
}
writeln!(fmt, "Ty interner total ty lt ct all")?;
$(writeln!(fmt, " {:18}: {uses:6} {usespc:4.1}%, \
@@ -1360,7 +1384,6 @@ impl<'tcx> TyCtxt<'tcx> {
Placeholder,
Generator,
GeneratorWitness,
- GeneratorWitnessMIR,
Dynamic,
Closure,
Tuple,
diff --git a/compiler/rustc_middle/src/ty/diagnostics.rs b/compiler/rustc_middle/src/ty/diagnostics.rs
index 5db9b775a..f03813a45 100644
--- a/compiler/rustc_middle/src/ty/diagnostics.rs
+++ b/compiler/rustc_middle/src/ty/diagnostics.rs
@@ -70,10 +70,10 @@ impl<'tcx> Ty<'tcx> {
/// description in error messages. This is used in the primary span label. Beyond what
/// `is_simple_ty` includes, it also accepts ADTs with no type arguments and references to
/// ADTs with no type arguments.
- pub fn is_simple_text(self) -> bool {
+ pub fn is_simple_text(self, tcx: TyCtxt<'tcx>) -> bool {
match self.kind() {
- Adt(_, args) => args.non_erasable_generics().next().is_none(),
- Ref(_, ty, _) => ty.is_simple_text(),
+ Adt(def, args) => args.non_erasable_generics(tcx, def.did()).next().is_none(),
+ Ref(_, ty, _) => ty.is_simple_text(tcx),
_ => self.is_simple_ty(),
}
}
@@ -493,7 +493,7 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for IsSuggestableVisitor<'tcx> {
Alias(Opaque, AliasTy { def_id, .. }) => {
let parent = self.tcx.parent(def_id);
let parent_ty = self.tcx.type_of(parent).instantiate_identity();
- if let DefKind::TyAlias { .. } | DefKind::AssocTy = self.tcx.def_kind(parent)
+ if let DefKind::TyAlias | DefKind::AssocTy = self.tcx.def_kind(parent)
&& let Alias(Opaque, AliasTy { def_id: parent_opaque_def_id, .. }) = *parent_ty.kind()
&& parent_opaque_def_id == def_id
{
@@ -577,7 +577,7 @@ impl<'tcx> FallibleTypeFolder<TyCtxt<'tcx>> for MakeSuggestableFolder<'tcx> {
Alias(Opaque, AliasTy { def_id, .. }) => {
let parent = self.tcx.parent(def_id);
let parent_ty = self.tcx.type_of(parent).instantiate_identity();
- if let hir::def::DefKind::TyAlias { .. } | hir::def::DefKind::AssocTy = self.tcx.def_kind(parent)
+ if let hir::def::DefKind::TyAlias | hir::def::DefKind::AssocTy = self.tcx.def_kind(parent)
&& let Alias(Opaque, AliasTy { def_id: parent_opaque_def_id, .. }) = *parent_ty.kind()
&& parent_opaque_def_id == def_id
{
diff --git a/compiler/rustc_middle/src/ty/erase_regions.rs b/compiler/rustc_middle/src/ty/erase_regions.rs
index 7895993cc..3371ea3be 100644
--- a/compiler/rustc_middle/src/ty/erase_regions.rs
+++ b/compiler/rustc_middle/src/ty/erase_regions.rs
@@ -20,8 +20,8 @@ impl<'tcx> TyCtxt<'tcx> {
where
T: TypeFoldable<TyCtxt<'tcx>>,
{
- // If there's nothing to erase avoid performing the query at all
- if !value.has_type_flags(TypeFlags::HAS_LATE_BOUND | TypeFlags::HAS_FREE_REGIONS) {
+ // If there's nothing to erase or anonymize, avoid performing the query at all
+ if !value.has_type_flags(TypeFlags::HAS_BINDER_VARS | TypeFlags::HAS_FREE_REGIONS) {
return value;
}
debug!("erase_regions({:?})", value);
diff --git a/compiler/rustc_middle/src/ty/error.rs b/compiler/rustc_middle/src/ty/error.rs
index bf6f082c2..459c8dfb5 100644
--- a/compiler/rustc_middle/src/ty/error.rs
+++ b/compiler/rustc_middle/src/ty/error.rs
@@ -11,7 +11,7 @@ use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use std::path::PathBuf;
-#[derive(Clone, Copy, Debug, PartialEq, Eq, TypeFoldable, TypeVisitable, Lift)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, TypeFoldable, TypeVisitable)]
pub struct ExpectedFound<T> {
pub expected: T,
pub found: T,
@@ -28,7 +28,7 @@ impl<T> ExpectedFound<T> {
}
// Data structures used in type unification
-#[derive(Copy, Clone, Debug, TypeVisitable, Lift, PartialEq, Eq)]
+#[derive(Copy, Clone, Debug, TypeVisitable, PartialEq, Eq)]
#[rustc_pass_by_value]
pub enum TypeError<'tcx> {
Mismatch,
@@ -242,8 +242,7 @@ impl<'tcx> Ty<'tcx> {
ty::Dynamic(..) => "trait object".into(),
ty::Closure(..) => "closure".into(),
ty::Generator(def_id, ..) => tcx.generator_kind(def_id).unwrap().descr().into(),
- ty::GeneratorWitness(..) |
- ty::GeneratorWitnessMIR(..) => "generator witness".into(),
+ ty::GeneratorWitness(..) => "generator witness".into(),
ty::Infer(ty::TyVar(_)) => "inferred type".into(),
ty::Infer(ty::IntVar(_)) => "integer".into(),
ty::Infer(ty::FloatVar(_)) => "floating-point number".into(),
@@ -295,7 +294,7 @@ impl<'tcx> Ty<'tcx> {
ty::Dynamic(..) => "trait object".into(),
ty::Closure(..) => "closure".into(),
ty::Generator(def_id, ..) => tcx.generator_kind(def_id).unwrap().descr().into(),
- ty::GeneratorWitness(..) | ty::GeneratorWitnessMIR(..) => "generator witness".into(),
+ ty::GeneratorWitness(..) => "generator witness".into(),
ty::Tuple(..) => "tuple".into(),
ty::Placeholder(..) => "higher-ranked type".into(),
ty::Bound(..) => "bound type variable".into(),
diff --git a/compiler/rustc_middle/src/ty/fast_reject.rs b/compiler/rustc_middle/src/ty/fast_reject.rs
index 668aa4521..16935d5b3 100644
--- a/compiler/rustc_middle/src/ty/fast_reject.rs
+++ b/compiler/rustc_middle/src/ty/fast_reject.rs
@@ -29,8 +29,7 @@ pub enum SimplifiedType {
Trait(DefId),
Closure(DefId),
Generator(DefId),
- GeneratorWitness(usize),
- GeneratorWitnessMIR(DefId),
+ GeneratorWitness(DefId),
Function(usize),
Placeholder,
}
@@ -130,10 +129,7 @@ pub fn simplify_type<'tcx>(
ty::Ref(_, _, mutbl) => Some(SimplifiedType::Ref(mutbl)),
ty::FnDef(def_id, _) | ty::Closure(def_id, _) => Some(SimplifiedType::Closure(def_id)),
ty::Generator(def_id, _, _) => Some(SimplifiedType::Generator(def_id)),
- ty::GeneratorWitness(tys) => {
- Some(SimplifiedType::GeneratorWitness(tys.skip_binder().len()))
- }
- ty::GeneratorWitnessMIR(def_id, _) => Some(SimplifiedType::GeneratorWitnessMIR(def_id)),
+ ty::GeneratorWitness(def_id, _) => Some(SimplifiedType::GeneratorWitness(def_id)),
ty::Never => Some(SimplifiedType::Never),
ty::Tuple(tys) => Some(SimplifiedType::Tuple(tys.len())),
ty::FnPtr(f) => Some(SimplifiedType::Function(f.skip_binder().inputs().len())),
@@ -169,7 +165,7 @@ impl SimplifiedType {
| SimplifiedType::Trait(d)
| SimplifiedType::Closure(d)
| SimplifiedType::Generator(d)
- | SimplifiedType::GeneratorWitnessMIR(d) => Some(d),
+ | SimplifiedType::GeneratorWitness(d) => Some(d),
_ => None,
}
}
@@ -240,7 +236,6 @@ impl DeepRejectCtxt {
| ty::Closure(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Placeholder(..)
| ty::Bound(..)
| ty::Infer(_) => bug!("unexpected impl_ty: {impl_ty}"),
@@ -342,7 +337,7 @@ impl DeepRejectCtxt {
ty::Error(_) => true,
- ty::GeneratorWitness(..) | ty::GeneratorWitnessMIR(..) => {
+ ty::GeneratorWitness(..) => {
bug!("unexpected obligation type: {:?}", obligation_ty)
}
}
diff --git a/compiler/rustc_middle/src/ty/flags.rs b/compiler/rustc_middle/src/ty/flags.rs
index bbd4a6233..c23d553d9 100644
--- a/compiler/rustc_middle/src/ty/flags.rs
+++ b/compiler/rustc_middle/src/ty/flags.rs
@@ -59,18 +59,8 @@ impl FlagComputation {
{
let mut computation = FlagComputation::new();
- for bv in value.bound_vars() {
- match bv {
- ty::BoundVariableKind::Ty(_) => {
- computation.flags |= TypeFlags::HAS_TY_LATE_BOUND;
- }
- ty::BoundVariableKind::Region(_) => {
- computation.flags |= TypeFlags::HAS_RE_LATE_BOUND;
- }
- ty::BoundVariableKind::Const => {
- computation.flags |= TypeFlags::HAS_CT_LATE_BOUND;
- }
- }
+ if !value.bound_vars().is_empty() {
+ computation.add_flags(TypeFlags::HAS_BINDER_VARS);
}
f(&mut computation, value.skip_binder());
@@ -121,11 +111,7 @@ impl FlagComputation {
self.add_ty(args.tupled_upvars_ty());
}
- &ty::GeneratorWitness(ts) => {
- self.bound_computation(ts, |flags, ts| flags.add_tys(ts));
- }
-
- ty::GeneratorWitnessMIR(_, args) => {
+ ty::GeneratorWitness(_, args) => {
let should_remove_further_specializable =
!self.flags.contains(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
self.add_args(args);
@@ -324,7 +310,9 @@ impl FlagComputation {
self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
match infer {
InferConst::Fresh(_) => self.add_flags(TypeFlags::HAS_CT_FRESH),
- InferConst::Var(_) => self.add_flags(TypeFlags::HAS_CT_INFER),
+ InferConst::Var(_) | InferConst::EffectVar(_) => {
+ self.add_flags(TypeFlags::HAS_CT_INFER)
+ }
}
}
ty::ConstKind::Bound(debruijn, _) => {
diff --git a/compiler/rustc_middle/src/ty/fold.rs b/compiler/rustc_middle/src/ty/fold.rs
index 77cf6bee7..00529a1e0 100644
--- a/compiler/rustc_middle/src/ty/fold.rs
+++ b/compiler/rustc_middle/src/ty/fold.rs
@@ -385,7 +385,7 @@ impl<'tcx> TyCtxt<'tcx> {
let index = entry.index();
let var = ty::BoundVar::from_usize(index);
let kind = entry
- .or_insert_with(|| ty::BoundVariableKind::Region(ty::BrAnon(None)))
+ .or_insert_with(|| ty::BoundVariableKind::Region(ty::BrAnon))
.expect_region();
let br = ty::BoundRegion { var, kind };
ty::Region::new_late_bound(self.tcx, ty::INNERMOST, br)
diff --git a/compiler/rustc_middle/src/ty/generic_args.rs b/compiler/rustc_middle/src/ty/generic_args.rs
index 97dab5cb4..72390e4bb 100644
--- a/compiler/rustc_middle/src/ty/generic_args.rs
+++ b/compiler/rustc_middle/src/ty/generic_args.rs
@@ -379,12 +379,17 @@ impl<'tcx> GenericArgs<'tcx> {
self.iter().filter_map(|k| k.as_const())
}
+ /// Returns generic arguments that are not lifetimes or host effect params.
#[inline]
pub fn non_erasable_generics(
&'tcx self,
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
) -> impl DoubleEndedIterator<Item = GenericArgKind<'tcx>> + 'tcx {
- self.iter().filter_map(|k| match k.unpack() {
- GenericArgKind::Lifetime(_) => None,
+ let generics = tcx.generics_of(def_id);
+ self.iter().enumerate().filter_map(|(i, k)| match k.unpack() {
+ _ if Some(i) == generics.host_effect_index => None,
+ ty::GenericArgKind::Lifetime(_) => None,
generic => Some(generic),
})
}
@@ -440,7 +445,7 @@ impl<'tcx> GenericArgs<'tcx> {
target_args: GenericArgsRef<'tcx>,
) -> GenericArgsRef<'tcx> {
let defs = tcx.generics_of(source_ancestor);
- tcx.mk_args_from_iter(target_args.iter().chain(self.iter().skip(defs.params.len())))
+ tcx.mk_args_from_iter(target_args.iter().chain(self.iter().skip(defs.count())))
}
pub fn truncate_to(&self, tcx: TyCtxt<'tcx>, generics: &ty::Generics) -> GenericArgsRef<'tcx> {
@@ -450,6 +455,11 @@ impl<'tcx> GenericArgs<'tcx> {
pub fn host_effect_param(&'tcx self) -> Option<ty::Const<'tcx>> {
self.consts().rfind(|x| matches!(x.kind(), ty::ConstKind::Param(p) if p.name == sym::host))
}
+
+ pub fn print_as_list(&self) -> String {
+ let v = self.iter().map(|arg| arg.to_string()).collect::<Vec<_>>();
+ format!("[{}]", v.join(", "))
+ }
}
impl<'tcx> TypeFoldable<TyCtxt<'tcx>> for GenericArgsRef<'tcx> {
@@ -1019,7 +1029,7 @@ impl<'a, 'tcx> ArgFolder<'a, 'tcx> {
/// Stores the user-given args to reach some fully qualified path
/// (e.g., `<T>::Item` or `<T as Trait>::Item`).
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
-#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
pub struct UserArgs<'tcx> {
/// The args for the item as given by the user.
pub args: GenericArgsRef<'tcx>,
@@ -1046,7 +1056,7 @@ pub struct UserArgs<'tcx> {
/// the self type, giving `Foo<?A>`. Finally, we unify that with
/// the self type here, which contains `?A` to be `&'static u32`
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
-#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
pub struct UserSelfTy<'tcx> {
pub impl_def_id: DefId,
pub self_ty: Ty<'tcx>,
diff --git a/compiler/rustc_middle/src/ty/generics.rs b/compiler/rustc_middle/src/ty/generics.rs
index 70a35f137..8e6c1cd4b 100644
--- a/compiler/rustc_middle/src/ty/generics.rs
+++ b/compiler/rustc_middle/src/ty/generics.rs
@@ -12,7 +12,7 @@ use super::{Clause, EarlyBoundRegion, InstantiatedPredicates, ParamConst, ParamT
pub enum GenericParamDefKind {
Lifetime,
Type { has_default: bool, synthetic: bool },
- Const { has_default: bool },
+ Const { has_default: bool, is_host_effect: bool },
}
impl GenericParamDefKind {
@@ -87,7 +87,7 @@ impl GenericParamDef {
GenericParamDefKind::Type { has_default, .. } if has_default => {
Some(tcx.type_of(self.def_id).map_bound(|t| t.into()))
}
- GenericParamDefKind::Const { has_default } if has_default => {
+ GenericParamDefKind::Const { has_default, .. } if has_default => {
Some(tcx.const_param_default(self.def_id).map_bound(|c| c.into()))
}
_ => None,
@@ -187,7 +187,7 @@ impl<'tcx> Generics {
GenericParamDefKind::Type { has_default, .. } => {
own_defaults.types += has_default as usize;
}
- GenericParamDefKind::Const { has_default } => {
+ GenericParamDefKind::Const { has_default, .. } => {
own_defaults.consts += has_default as usize;
}
}
@@ -212,10 +212,12 @@ impl<'tcx> Generics {
pub fn own_requires_monomorphization(&self) -> bool {
for param in &self.params {
match param.kind {
- GenericParamDefKind::Type { .. } | GenericParamDefKind::Const { .. } => {
+ GenericParamDefKind::Type { .. }
+ | GenericParamDefKind::Const { is_host_effect: false, .. } => {
return true;
}
- GenericParamDefKind::Lifetime => {}
+ GenericParamDefKind::Lifetime
+ | GenericParamDefKind::Const { is_host_effect: true, .. } => {}
}
}
false
diff --git a/compiler/rustc_middle/src/ty/instance.rs b/compiler/rustc_middle/src/ty/instance.rs
index 8913bf76d..0b0a708e4 100644
--- a/compiler/rustc_middle/src/ty/instance.rs
+++ b/compiler/rustc_middle/src/ty/instance.rs
@@ -18,6 +18,9 @@ use std::fmt;
/// Monomorphization happens on-the-fly and no monomorphized MIR is ever created. Instead, this type
/// simply couples a potentially generic `InstanceDef` with some args, and codegen and const eval
/// will do all required substitution as they run.
+///
+/// Note: the `Lift` impl is currently not used by rustc, but is used by
+/// rustc_codegen_cranelift when the `jit` feature is enabled.
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)]
#[derive(HashStable, Lift, TypeFoldable, TypeVisitable)]
pub struct Instance<'tcx> {
@@ -115,7 +118,7 @@ impl<'tcx> Instance<'tcx> {
/// lifetimes erased, allowing a `ParamEnv` to be specified for use during normalization.
pub fn ty(&self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Ty<'tcx> {
let ty = tcx.type_of(self.def.def_id());
- tcx.subst_and_normalize_erasing_regions(self.args, param_env, ty)
+ tcx.instantiate_and_normalize_erasing_regions(self.args, param_env, ty)
}
/// Finds a crate that contains a monomorphization of this instance that
@@ -139,7 +142,7 @@ impl<'tcx> Instance<'tcx> {
}
// If this a non-generic instance, it cannot be a shared monomorphization.
- self.args.non_erasable_generics().next()?;
+ self.args.non_erasable_generics(tcx, self.def_id()).next()?;
match self.def {
InstanceDef::Item(def) => tcx
@@ -344,6 +347,7 @@ impl<'tcx> Instance<'tcx> {
pub fn mono(tcx: TyCtxt<'tcx>, def_id: DefId) -> Instance<'tcx> {
let args = GenericArgs::for_item(tcx, def_id, |param, _| match param.kind {
ty::GenericParamDefKind::Lifetime => tcx.lifetimes.re_erased.into(),
+ ty::GenericParamDefKind::Const { is_host_effect: true, .. } => tcx.consts.true_.into(),
ty::GenericParamDefKind::Type { .. } => {
bug!("Instance::mono: {:?} has type parameters", def_id)
}
@@ -576,7 +580,7 @@ impl<'tcx> Instance<'tcx> {
self.def.has_polymorphic_mir_body().then_some(self.args)
}
- pub fn subst_mir<T>(&self, tcx: TyCtxt<'tcx>, v: EarlyBinder<&T>) -> T
+ pub fn instantiate_mir<T>(&self, tcx: TyCtxt<'tcx>, v: EarlyBinder<&T>) -> T
where
T: TypeFoldable<TyCtxt<'tcx>> + Copy,
{
@@ -589,7 +593,7 @@ impl<'tcx> Instance<'tcx> {
}
#[inline(always)]
- pub fn subst_mir_and_normalize_erasing_regions<T>(
+ pub fn instantiate_mir_and_normalize_erasing_regions<T>(
&self,
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
@@ -599,14 +603,14 @@ impl<'tcx> Instance<'tcx> {
T: TypeFoldable<TyCtxt<'tcx>> + Clone,
{
if let Some(args) = self.args_for_mir_body() {
- tcx.subst_and_normalize_erasing_regions(args, param_env, v)
+ tcx.instantiate_and_normalize_erasing_regions(args, param_env, v)
} else {
tcx.normalize_erasing_regions(param_env, v.skip_binder())
}
}
#[inline(always)]
- pub fn try_subst_mir_and_normalize_erasing_regions<T>(
+ pub fn try_instantiate_mir_and_normalize_erasing_regions<T>(
&self,
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
@@ -616,7 +620,7 @@ impl<'tcx> Instance<'tcx> {
T: TypeFoldable<TyCtxt<'tcx>> + Clone,
{
if let Some(args) = self.args_for_mir_body() {
- tcx.try_subst_and_normalize_erasing_regions(args, param_env, v)
+ tcx.try_instantiate_and_normalize_erasing_regions(args, param_env, v)
} else {
tcx.try_normalize_erasing_regions(param_env, v.skip_binder())
}
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index e362b3477..bccf5e839 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -4,7 +4,9 @@ use crate::query::TyCtxtAt;
use crate::ty::normalize_erasing_regions::NormalizationError;
use crate::ty::{self, ConstKind, ReprOptions, Ty, TyCtxt, TypeVisitableExt};
use rustc_error_messages::DiagnosticMessage;
-use rustc_errors::{DiagnosticBuilder, Handler, IntoDiagnostic};
+use rustc_errors::{
+ DiagnosticArgValue, DiagnosticBuilder, Handler, IntoDiagnostic, IntoDiagnosticArg,
+};
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_index::IndexVec;
@@ -265,6 +267,12 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> {
}
}
+impl<'tcx> IntoDiagnosticArg for LayoutError<'tcx> {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ self.to_string().into_diagnostic_arg()
+ }
+}
+
#[derive(Clone, Copy)]
pub struct LayoutCx<'tcx, C> {
pub tcx: C,
@@ -802,7 +810,6 @@ where
| ty::Never
| ty::FnDef(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Foreign(..)
| ty::Dynamic(_, _, ty::Dyn) => {
bug!("TyAndLayout::field({:?}): not applicable", this)
@@ -1110,6 +1117,10 @@ where
fn is_unit(this: TyAndLayout<'tcx>) -> bool {
matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
}
+
+ fn is_transparent(this: TyAndLayout<'tcx>) -> bool {
+ matches!(this.ty.kind(), ty::Adt(def, _) if def.repr().transparent())
+ }
}
/// Calculates whether a function's ABI can unwind or not.
diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs
index 1274f427e..aa1e7f216 100644
--- a/compiler/rustc_middle/src/ty/mod.rs
+++ b/compiler/rustc_middle/src/ty/mod.rs
@@ -98,17 +98,16 @@ pub use self::sty::BoundRegionKind::*;
pub use self::sty::{
AliasTy, Article, Binder, BoundRegion, BoundRegionKind, BoundTy, BoundTyKind, BoundVar,
BoundVariableKind, CanonicalPolyFnSig, ClosureArgs, ClosureArgsParts, ConstKind, ConstVid,
- EarlyBoundRegion, ExistentialPredicate, ExistentialProjection, ExistentialTraitRef, FnSig,
- FreeRegion, GenSig, GeneratorArgs, GeneratorArgsParts, InlineConstArgs, InlineConstArgsParts,
- ParamConst, ParamTy, PolyExistentialPredicate, PolyExistentialProjection,
+ EarlyBoundRegion, EffectVid, ExistentialPredicate, ExistentialProjection, ExistentialTraitRef,
+ FnSig, FreeRegion, GenSig, GeneratorArgs, GeneratorArgsParts, InlineConstArgs,
+ InlineConstArgsParts, ParamConst, ParamTy, PolyExistentialPredicate, PolyExistentialProjection,
PolyExistentialTraitRef, PolyFnSig, PolyGenSig, PolyTraitRef, Region, RegionKind, RegionVid,
TraitRef, TyKind, TypeAndMut, UpvarArgs, VarianceDiagInfo,
};
pub use self::trait_def::TraitDef;
pub use self::typeck_results::{
- CanonicalUserType, CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations,
- GeneratorDiagnosticData, GeneratorInteriorTypeCause, TypeckResults, UserType,
- UserTypeAnnotationIndex,
+ CanonicalUserType, CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations, TypeckResults,
+ UserType, UserTypeAnnotationIndex,
};
pub mod _match;
@@ -162,8 +161,6 @@ pub struct ResolverOutputs {
#[derive(Debug)]
pub struct ResolverGlobalCtxt {
pub visibilities: FxHashMap<LocalDefId, Visibility>,
- /// This field is used to decide whether we should make `PRIVATE_IN_PUBLIC` a hard error.
- pub has_pub_restricted: bool,
/// Item with a given `LocalDefId` was defined during macro expansion with ID `ExpnId`.
pub expn_that_defined: FxHashMap<LocalDefId, ExpnId>,
pub effective_visibilities: EffectiveVisibilities,
@@ -238,7 +235,7 @@ pub struct ImplHeader<'tcx> {
pub impl_def_id: DefId,
pub self_ty: Ty<'tcx>,
pub trait_ref: Option<TraitRef<'tcx>>,
- pub predicates: Vec<(Predicate<'tcx>, Span)>,
+ pub predicates: Vec<Predicate<'tcx>>,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, TypeFoldable, TypeVisitable)]
@@ -282,6 +279,19 @@ impl fmt::Display for ImplPolarity {
}
}
+#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable, Debug)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum Asyncness {
+ Yes,
+ No,
+}
+
+impl Asyncness {
+ pub fn is_async(self) -> bool {
+ matches!(self, Asyncness::Yes)
+ }
+}
+
#[derive(Clone, Debug, PartialEq, Eq, Copy, Hash, Encodable, Decodable, HashStable)]
pub enum Visibility<Id = LocalDefId> {
/// Visible everywhere (including in other crates).
@@ -568,6 +578,11 @@ impl rustc_errors::IntoDiagnosticArg for Clause<'_> {
pub struct Clause<'tcx>(Interned<'tcx, WithCachedTypeInfo<ty::Binder<'tcx, PredicateKind<'tcx>>>>);
impl<'tcx> Clause<'tcx> {
+ pub fn from_projection_clause(tcx: TyCtxt<'tcx>, pred: PolyProjectionPredicate<'tcx>) -> Self {
+ let pred: Predicate<'tcx> = pred.to_predicate(tcx);
+ pred.expect_clause()
+ }
+
pub fn as_predicate(self) -> Predicate<'tcx> {
Predicate(self.0)
}
@@ -1255,14 +1270,6 @@ impl<'tcx> ToPredicate<'tcx, Clause<'tcx>> for TraitRef<'tcx> {
}
}
-impl<'tcx> ToPredicate<'tcx, Clause<'tcx>> for TraitPredicate<'tcx> {
- #[inline(always)]
- fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Clause<'tcx> {
- let p: Predicate<'tcx> = self.to_predicate(tcx);
- p.expect_clause()
- }
-}
-
impl<'tcx> ToPredicate<'tcx> for Binder<'tcx, TraitRef<'tcx>> {
#[inline(always)]
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
@@ -1289,18 +1296,6 @@ impl<'tcx> ToPredicate<'tcx, PolyTraitPredicate<'tcx>> for Binder<'tcx, TraitRef
}
}
-impl<'tcx> ToPredicate<'tcx, PolyTraitPredicate<'tcx>> for TraitRef<'tcx> {
- fn to_predicate(self, tcx: TyCtxt<'tcx>) -> PolyTraitPredicate<'tcx> {
- ty::Binder::dummy(self).to_predicate(tcx)
- }
-}
-
-impl<'tcx> ToPredicate<'tcx, PolyTraitPredicate<'tcx>> for TraitPredicate<'tcx> {
- fn to_predicate(self, _tcx: TyCtxt<'tcx>) -> PolyTraitPredicate<'tcx> {
- ty::Binder::dummy(self)
- }
-}
-
impl<'tcx> ToPredicate<'tcx> for PolyTraitPredicate<'tcx> {
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
self.map_bound(|p| PredicateKind::Clause(ClauseKind::Trait(p))).to_predicate(tcx)
@@ -1314,12 +1309,6 @@ impl<'tcx> ToPredicate<'tcx, Clause<'tcx>> for PolyTraitPredicate<'tcx> {
}
}
-impl<'tcx> ToPredicate<'tcx> for OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>> {
- fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
- ty::Binder::dummy(PredicateKind::Clause(ClauseKind::RegionOutlives(self))).to_predicate(tcx)
- }
-}
-
impl<'tcx> ToPredicate<'tcx> for PolyRegionOutlivesPredicate<'tcx> {
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
self.map_bound(|p| PredicateKind::Clause(ClauseKind::RegionOutlives(p))).to_predicate(tcx)
@@ -1332,12 +1321,6 @@ impl<'tcx> ToPredicate<'tcx> for OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>> {
}
}
-impl<'tcx> ToPredicate<'tcx> for PolyTypeOutlivesPredicate<'tcx> {
- fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
- self.map_bound(|p| PredicateKind::Clause(ClauseKind::TypeOutlives(p))).to_predicate(tcx)
- }
-}
-
impl<'tcx> ToPredicate<'tcx> for ProjectionPredicate<'tcx> {
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
ty::Binder::dummy(PredicateKind::Clause(ClauseKind::Projection(self))).to_predicate(tcx)
@@ -1357,13 +1340,6 @@ impl<'tcx> ToPredicate<'tcx, Clause<'tcx>> for ProjectionPredicate<'tcx> {
}
}
-impl<'tcx> ToPredicate<'tcx, Clause<'tcx>> for PolyProjectionPredicate<'tcx> {
- fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Clause<'tcx> {
- let p: Predicate<'tcx> = self.to_predicate(tcx);
- p.expect_clause()
- }
-}
-
impl<'tcx> ToPredicate<'tcx> for TraitPredicate<'tcx> {
fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
PredicateKind::Clause(ClauseKind::Trait(self)).to_predicate(tcx)
@@ -1512,7 +1488,7 @@ impl<'a, 'tcx> IntoIterator for &'a InstantiatedPredicates<'tcx> {
}
}
-#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable, TyEncodable, TyDecodable, Lift)]
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable, TyEncodable, TyDecodable)]
#[derive(TypeFoldable, TypeVisitable)]
pub struct OpaqueTypeKey<'tcx> {
pub def_id: LocalDefId,
@@ -1795,7 +1771,7 @@ impl<'tcx> ParamEnv<'tcx> {
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable)]
-#[derive(HashStable, Lift)]
+#[derive(HashStable)]
pub struct ParamEnvAnd<'tcx, T> {
pub param_env: ParamEnv<'tcx>,
pub value: T,
@@ -2150,6 +2126,7 @@ impl<'tcx> TyCtxt<'tcx> {
for attr in self.get_attrs(did, sym::repr) {
for r in attr::parse_repr_attr(&self.sess, attr) {
flags.insert(match r {
+ attr::ReprRust => ReprFlags::empty(),
attr::ReprC => ReprFlags::IS_C,
attr::ReprPacked(pack) => {
let pack = Align::from_bytes(pack as u64).unwrap();
@@ -2215,10 +2192,6 @@ impl<'tcx> TyCtxt<'tcx> {
// The name of a constructor is that of its parent.
rustc_hir::definitions::DefPathData::Ctor => self
.opt_item_name(DefId { krate: def_id.krate, index: def_key.parent.unwrap() }),
- // The name of opaque types only exists in HIR.
- rustc_hir::definitions::DefPathData::ImplTrait
- if let Some(def_id) = def_id.as_local() =>
- self.hir().opt_name(self.hir().local_def_id_to_hir_id(def_id)),
_ => def_key.get_opt_name(),
}
}
@@ -2409,6 +2382,22 @@ impl<'tcx> TyCtxt<'tcx> {
}
}
+ pub fn get_attrs_by_path<'attr>(
+ self,
+ did: DefId,
+ attr: &'attr [Symbol],
+ ) -> impl Iterator<Item = &'tcx ast::Attribute> + 'attr
+ where
+ 'tcx: 'attr,
+ {
+ let filter_fn = move |a: &&ast::Attribute| a.path_matches(&attr);
+ if let Some(did) = did.as_local() {
+ self.hir().attrs(self.hir().local_def_id_to_hir_id(did)).iter().filter(filter_fn)
+ } else {
+ self.item_attrs(did).iter().filter(filter_fn)
+ }
+ }
+
pub fn get_attr(self, did: impl Into<DefId>, attr: Symbol) -> Option<&'tcx ast::Attribute> {
if cfg!(debug_assertions) && !rustc_feature::is_valid_for_get_attr(attr) {
let did: DefId = did.into();
diff --git a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
index 2415d50b2..fd125af20 100644
--- a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
+++ b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
@@ -134,8 +134,9 @@ impl<'tcx> TyCtxt<'tcx> {
/// in-scope substitutions and then normalizing any associated
/// types.
/// Panics if normalization fails. In case normalization might fail
- /// use `try_subst_and_normalize_erasing_regions` instead.
- pub fn subst_and_normalize_erasing_regions<T>(
+ /// use `try_instantiate_and_normalize_erasing_regions` instead.
+ #[instrument(level = "debug", skip(self))]
+ pub fn instantiate_and_normalize_erasing_regions<T>(
self,
param_args: GenericArgsRef<'tcx>,
param_env: ty::ParamEnv<'tcx>,
@@ -144,22 +145,16 @@ impl<'tcx> TyCtxt<'tcx> {
where
T: TypeFoldable<TyCtxt<'tcx>>,
{
- debug!(
- "subst_and_normalize_erasing_regions(\
- param_args={:?}, \
- value={:?}, \
- param_env={:?})",
- param_args, value, param_env,
- );
let substituted = value.instantiate(self, param_args);
self.normalize_erasing_regions(param_env, substituted)
}
/// Monomorphizes a type from the AST by first applying the
/// in-scope substitutions and then trying to normalize any associated
- /// types. Contrary to `subst_and_normalize_erasing_regions` this does
+ /// types. Contrary to `instantiate_and_normalize_erasing_regions` this does
/// not assume that normalization succeeds.
- pub fn try_subst_and_normalize_erasing_regions<T>(
+ #[instrument(level = "debug", skip(self))]
+ pub fn try_instantiate_and_normalize_erasing_regions<T>(
self,
param_args: GenericArgsRef<'tcx>,
param_env: ty::ParamEnv<'tcx>,
@@ -168,13 +163,6 @@ impl<'tcx> TyCtxt<'tcx> {
where
T: TypeFoldable<TyCtxt<'tcx>>,
{
- debug!(
- "subst_and_normalize_erasing_regions(\
- param_args={:?}, \
- value={:?}, \
- param_env={:?})",
- param_args, value, param_env,
- );
let substituted = value.instantiate(self, param_args);
self.try_normalize_erasing_regions(param_env, substituted)
}
diff --git a/compiler/rustc_middle/src/ty/opaque_types.rs b/compiler/rustc_middle/src/ty/opaque_types.rs
index 0ff5ac903..6491936c2 100644
--- a/compiler/rustc_middle/src/ty/opaque_types.rs
+++ b/compiler/rustc_middle/src/ty/opaque_types.rs
@@ -157,9 +157,9 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for ReverseMapper<'tcx> {
Ty::new_generator(self.tcx, def_id, args, movability)
}
- ty::GeneratorWitnessMIR(def_id, args) => {
+ ty::GeneratorWitness(def_id, args) => {
let args = self.fold_closure_args(def_id, args);
- Ty::new_generator_witness_mir(self.tcx, def_id, args)
+ Ty::new_generator_witness(self.tcx, def_id, args)
}
ty::Param(param) => {
diff --git a/compiler/rustc_middle/src/ty/parameterized.rs b/compiler/rustc_middle/src/ty/parameterized.rs
index f1c389842..9aa673e44 100644
--- a/compiler/rustc_middle/src/ty/parameterized.rs
+++ b/compiler/rustc_middle/src/ty/parameterized.rs
@@ -62,6 +62,7 @@ trivially_parameterized_over_tcx! {
crate::middle::resolve_bound_vars::ObjectLifetimeDefault,
crate::mir::ConstQualifs,
ty::AssocItemContainer,
+ ty::Asyncness,
ty::DeducedParamAttrs,
ty::Generics,
ty::ImplPolarity,
@@ -131,5 +132,4 @@ parameterized_over_tcx! {
ty::Predicate,
ty::Clause,
ty::ClauseKind,
- ty::GeneratorDiagnosticData,
}
diff --git a/compiler/rustc_middle/src/ty/print/mod.rs b/compiler/rustc_middle/src/ty/print/mod.rs
index 05871d0bc..aa8e2e307 100644
--- a/compiler/rustc_middle/src/ty/print/mod.rs
+++ b/compiler/rustc_middle/src/ty/print/mod.rs
@@ -271,7 +271,7 @@ fn characteristic_def_id_of_type_cached<'a>(
ty::FnDef(def_id, _)
| ty::Closure(def_id, _)
| ty::Generator(def_id, _, _)
- | ty::GeneratorWitnessMIR(def_id, _)
+ | ty::GeneratorWitness(def_id, _)
| ty::Foreign(def_id) => Some(def_id),
ty::Bool
@@ -286,7 +286,6 @@ fn characteristic_def_id_of_type_cached<'a>(
| ty::Infer(_)
| ty::Bound(..)
| ty::Error(_)
- | ty::GeneratorWitness(..)
| ty::Never
| ty::Float(_) => None,
}
diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs
index ac0c88468..2d7350387 100644
--- a/compiler/rustc_middle/src/ty/print/pretty.rs
+++ b/compiler/rustc_middle/src/ty/print/pretty.rs
@@ -136,10 +136,8 @@ define_helper!(
///
/// Regions not selected by the region highlight mode are presently
/// unaffected.
-#[derive(Copy, Clone)]
+#[derive(Copy, Clone, Default)]
pub struct RegionHighlightMode<'tcx> {
- tcx: TyCtxt<'tcx>,
-
/// If enabled, when we see the selected region, use "`'N`"
/// instead of the ordinary behavior.
highlight_regions: [Option<(ty::Region<'tcx>, usize)>; 3],
@@ -155,14 +153,6 @@ pub struct RegionHighlightMode<'tcx> {
}
impl<'tcx> RegionHighlightMode<'tcx> {
- pub fn new(tcx: TyCtxt<'tcx>) -> Self {
- Self {
- tcx,
- highlight_regions: Default::default(),
- highlight_bound_region: Default::default(),
- }
- }
-
/// If `region` and `number` are both `Some`, invokes
/// `highlighting_region`.
pub fn maybe_highlighting_region(
@@ -188,8 +178,13 @@ impl<'tcx> RegionHighlightMode<'tcx> {
}
/// Convenience wrapper for `highlighting_region`.
- pub fn highlighting_region_vid(&mut self, vid: ty::RegionVid, number: usize) {
- self.highlighting_region(ty::Region::new_var(self.tcx, vid), number)
+ pub fn highlighting_region_vid(
+ &mut self,
+ tcx: TyCtxt<'tcx>,
+ vid: ty::RegionVid,
+ number: usize,
+ ) {
+ self.highlighting_region(ty::Region::new_var(tcx, vid), number)
}
/// Returns `Some(n)` with the number to use for the given region, if any.
@@ -365,7 +360,7 @@ pub trait PrettyPrinter<'tcx>:
self.write_str(get_local_name(&self, symbol, parent, parent_key).as_str())?;
self.write_str("::")?;
} else if let DefKind::Struct | DefKind::Union | DefKind::Enum | DefKind::Trait
- | DefKind::TyAlias { .. } | DefKind::Fn | DefKind::Const | DefKind::Static(_) = kind
+ | DefKind::TyAlias | DefKind::Fn | DefKind::Const | DefKind::Static(_) = kind
{
} else {
// If not covered above, like for example items out of `impl` blocks, fallback.
@@ -760,15 +755,18 @@ pub trait PrettyPrinter<'tcx>:
// only affect certain debug messages (e.g. messages printed
// from `rustc_middle::ty` during the computation of `tcx.predicates_of`),
// and should have no effect on any compiler output.
+ // [Unless `-Zverbose` is used, e.g. in the output of
+ // `tests/ui/nll/ty-outlives/impl-trait-captures.rs`, for
+ // example.]
if self.should_print_verbose() {
// FIXME(eddyb) print this with `print_def_path`.
- p!(write("Opaque({:?}, {:?})", def_id, args));
+ p!(write("Opaque({:?}, {})", def_id, args.print_as_list()));
return Ok(self);
}
let parent = self.tcx().parent(def_id);
match self.tcx().def_kind(parent) {
- DefKind::TyAlias { .. } | DefKind::AssocTy => {
+ DefKind::TyAlias | DefKind::AssocTy => {
// NOTE: I know we should check for NO_QUERIES here, but it's alright.
// `type_of` on a type alias or assoc type should never cause a cycle.
if let ty::Alias(ty::Opaque, ty::AliasTy { def_id: d, .. }) =
@@ -797,7 +795,7 @@ pub trait PrettyPrinter<'tcx>:
}
ty::Str => p!("str"),
ty::Generator(did, args, movability) => {
- p!(write("["));
+ p!(write("{{"));
let generator_kind = self.tcx().generator_kind(did).unwrap();
let should_print_movability =
self.should_print_verbose() || generator_kind == hir::GeneratorKind::Gen;
@@ -838,13 +836,10 @@ pub trait PrettyPrinter<'tcx>:
}
}
- p!("]")
- }
- ty::GeneratorWitness(types) => {
- p!(in_binder(&types));
+ p!("}}")
}
- ty::GeneratorWitnessMIR(did, args) => {
- p!(write("["));
+ ty::GeneratorWitness(did, args) => {
+ p!(write("{{"));
if !self.tcx().sess.verbose() {
p!("generator witness");
// FIXME(eddyb) should use `def_span`.
@@ -863,10 +858,10 @@ pub trait PrettyPrinter<'tcx>:
p!(print_def_path(did, args));
}
- p!("]")
+ p!("}}")
}
ty::Closure(did, args) => {
- p!(write("["));
+ p!(write("{{"));
if !self.should_print_verbose() {
p!(write("closure"));
// FIXME(eddyb) should use `def_span`.
@@ -894,7 +889,7 @@ pub trait PrettyPrinter<'tcx>:
p!(print_def_path(did, args));
if !args.as_closure().is_valid() {
p!(" closure_args=(unavailable)");
- p!(write(" args={:?}", args));
+ p!(write(" args={}", args.print_as_list()));
} else {
p!(" closure_kind_ty=", print(args.as_closure().kind_ty()));
p!(
@@ -906,7 +901,7 @@ pub trait PrettyPrinter<'tcx>:
p!(")");
}
}
- p!("]");
+ p!("}}");
}
ty::Array(ty, sz) => p!("[", print(ty), "; ", print(sz), "]"),
ty::Slice(ty) => p!("[", print(ty), "]"),
@@ -1063,7 +1058,7 @@ pub trait PrettyPrinter<'tcx>:
}
for (assoc_item_def_id, term) in assoc_items {
- // Skip printing `<[generator@] as Generator<_>>::Return` from async blocks,
+ // Skip printing `<{generator@} as Generator<_>>::Return` from async blocks,
// unless we can find out what generator return type it comes from.
let term = if let Some(ty) = term.skip_binder().ty()
&& let ty::Alias(ty::Projection, proj) = ty.kind()
@@ -1123,6 +1118,17 @@ pub trait PrettyPrinter<'tcx>:
}
}
+ if self.tcx().features().return_type_notation
+ && let Some(ty::ImplTraitInTraitData::Trait { fn_def_id, .. }) = self.tcx().opt_rpitit_info(def_id)
+ && let ty::Alias(_, alias_ty) = self.tcx().fn_sig(fn_def_id).skip_binder().output().skip_binder().kind()
+ && alias_ty.def_id == def_id
+ {
+ let num_args = self.tcx().generics_of(fn_def_id).count();
+ write!(self, " {{ ")?;
+ self = self.print_def_path(fn_def_id, &args[..num_args])?;
+ write!(self, "() }}")?;
+ }
+
Ok(self)
}
@@ -1239,21 +1245,18 @@ pub trait PrettyPrinter<'tcx>:
.generics_of(principal.def_id)
.own_args_no_defaults(cx.tcx(), principal.args);
- let mut projections = predicates.projection_bounds();
-
- let mut args = args.iter().cloned();
- let arg0 = args.next();
- let projection0 = projections.next();
- if arg0.is_some() || projection0.is_some() {
- let args = arg0.into_iter().chain(args);
- let projections = projection0.into_iter().chain(projections);
+ let mut projections: Vec<_> = predicates.projection_bounds().collect();
+ projections.sort_by_cached_key(|proj| {
+ cx.tcx().item_name(proj.item_def_id()).to_string()
+ });
+ if !args.is_empty() || !projections.is_empty() {
p!(generic_delimiters(|mut cx| {
- cx = cx.comma_sep(args)?;
- if arg0.is_some() && projection0.is_some() {
+ cx = cx.comma_sep(args.iter().copied())?;
+ if !args.is_empty() && !projections.is_empty() {
write!(cx, ", ")?;
}
- cx.comma_sep(projections)
+ cx.comma_sep(projections.iter().copied())
}));
}
}
@@ -1707,6 +1710,21 @@ pub trait PrettyPrinter<'tcx>:
}
}
+pub(crate) fn pretty_print_const<'tcx>(
+ c: ty::Const<'tcx>,
+ fmt: &mut fmt::Formatter<'_>,
+ print_types: bool,
+) -> fmt::Result {
+ ty::tls::with(|tcx| {
+ let literal = tcx.lift(c).unwrap();
+ let mut cx = FmtPrinter::new(tcx, Namespace::ValueNS);
+ cx.print_alloc_ids = true;
+ let cx = cx.pretty_print_const(literal, print_types)?;
+ fmt.write_str(&cx.into_buffer())?;
+ Ok(())
+ })
+}
+
// HACK(eddyb) boxed to avoid moving around a large struct by-value.
pub struct FmtPrinter<'a, 'tcx>(Box<FmtPrinterData<'a, 'tcx>>);
@@ -1767,7 +1785,7 @@ impl<'a, 'tcx> FmtPrinter<'a, 'tcx> {
printed_type_count: 0,
type_length_limit,
truncated: false,
- region_highlight_mode: RegionHighlightMode::new(tcx),
+ region_highlight_mode: RegionHighlightMode::default(),
ty_infer_name_resolver: None,
const_infer_name_resolver: None,
}))
@@ -2312,7 +2330,7 @@ impl<'a, 'tcx> ty::TypeFolder<TyCtxt<'tcx>> for RegionFolder<'a, 'tcx> {
// If this is an anonymous placeholder, don't rename. Otherwise, in some
// async fns, we get a `for<'r> Send` bound
match kind {
- ty::BrAnon(..) | ty::BrEnv => r,
+ ty::BrAnon | ty::BrEnv => r,
_ => {
// Index doesn't matter, since this is just for naming and these never get bound
let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind };
@@ -2433,7 +2451,7 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
binder_level_idx: ty::DebruijnIndex,
br: ty::BoundRegion| {
let (name, kind) = match br.kind {
- ty::BrAnon(..) | ty::BrEnv => {
+ ty::BrAnon | ty::BrEnv => {
let name = next_name(&self);
if let Some(lt_idx) = lifetime_idx {
@@ -2735,20 +2753,14 @@ forward_display_to_print! {
// HACK(eddyb) these are exhaustive instead of generic,
// because `for<'tcx>` isn't possible yet.
- ty::PolyExistentialPredicate<'tcx>,
ty::PolyExistentialProjection<'tcx>,
ty::PolyExistentialTraitRef<'tcx>,
ty::Binder<'tcx, ty::TraitRef<'tcx>>,
ty::Binder<'tcx, TraitRefPrintOnlyTraitPath<'tcx>>,
- ty::Binder<'tcx, TraitRefPrintOnlyTraitName<'tcx>>,
ty::Binder<'tcx, ty::FnSig<'tcx>>,
ty::Binder<'tcx, ty::TraitPredicate<'tcx>>,
ty::Binder<'tcx, TraitPredPrintModifiersAndPath<'tcx>>,
- ty::Binder<'tcx, ty::SubtypePredicate<'tcx>>,
ty::Binder<'tcx, ty::ProjectionPredicate<'tcx>>,
- ty::Binder<'tcx, ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>>,
- ty::Binder<'tcx, ty::OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>>>,
-
ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>,
ty::OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>>
}
@@ -2986,7 +2998,7 @@ fn for_each_def(tcx: TyCtxt<'_>, mut collect_fn: impl for<'b> FnMut(&'b Ident, N
match child.res {
def::Res::Def(DefKind::AssocTy, _) => {}
- def::Res::Def(DefKind::TyAlias { .. }, _) => {}
+ def::Res::Def(DefKind::TyAlias, _) => {}
def::Res::Def(defkind, def_id) => {
if let Some(ns) = defkind.ns() {
collect_fn(&child.ident, ns, def_id);
diff --git a/compiler/rustc_middle/src/ty/relate.rs b/compiler/rustc_middle/src/ty/relate.rs
index 47512d350..e9d763afa 100644
--- a/compiler/rustc_middle/src/ty/relate.rs
+++ b/compiler/rustc_middle/src/ty/relate.rs
@@ -453,24 +453,14 @@ pub fn structurally_relate_tys<'tcx, R: TypeRelation<'tcx>>(
Ok(Ty::new_generator(tcx, a_id, args, movability))
}
- (&ty::GeneratorWitness(a_types), &ty::GeneratorWitness(b_types)) => {
- // Wrap our types with a temporary GeneratorWitness struct
- // inside the binder so we can related them
- let a_types = a_types.map_bound(GeneratorWitness);
- let b_types = b_types.map_bound(GeneratorWitness);
- // Then remove the GeneratorWitness for the result
- let types = relation.relate(a_types, b_types)?.map_bound(|witness| witness.0);
- Ok(Ty::new_generator_witness(tcx, types))
- }
-
- (&ty::GeneratorWitnessMIR(a_id, a_args), &ty::GeneratorWitnessMIR(b_id, b_args))
+ (&ty::GeneratorWitness(a_id, a_args), &ty::GeneratorWitness(b_id, b_args))
if a_id == b_id =>
{
// All GeneratorWitness types with the same id represent
// the (anonymous) type of the same generator expression. So
// all of their regions should be equated.
let args = relation.relate(a_args, b_args)?;
- Ok(Ty::new_generator_witness_mir(tcx, a_id, args))
+ Ok(Ty::new_generator_witness(tcx, a_id, args))
}
(&ty::Closure(a_id, a_args), &ty::Closure(b_id, b_args)) if a_id == b_id => {
diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs
index f979ddd00..2adbe9e03 100644
--- a/compiler/rustc_middle/src/ty/structural_impls.rs
+++ b/compiler/rustc_middle/src/ty/structural_impls.rs
@@ -9,15 +9,13 @@ use crate::ty::print::{with_no_trimmed_paths, FmtPrinter, Printer};
use crate::ty::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor};
use crate::ty::{self, AliasTy, InferConst, Lift, Term, TermKind, Ty, TyCtxt};
use rustc_hir::def::Namespace;
-use rustc_index::{Idx, IndexVec};
use rustc_target::abi::TyAndLayout;
use rustc_type_ir::{ConstKind, DebugWithInfcx, InferCtxtLike, OptWithInfcx};
use std::fmt::{self, Debug};
use std::ops::ControlFlow;
-use std::rc::Rc;
-use std::sync::Arc;
+use super::print::PrettyPrinter;
use super::{GenericArg, GenericArgKind, Region};
impl fmt::Debug for ty::TraitDef {
@@ -70,7 +68,7 @@ impl<'tcx> fmt::Debug for ty::adjustment::Adjustment<'tcx> {
impl fmt::Debug for ty::BoundRegionKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
- ty::BrAnon(span) => write!(f, "BrAnon({span:?})"),
+ ty::BrAnon => write!(f, "BrAnon"),
ty::BrNamed(did, name) => {
if did.is_crate_root() {
write!(f, "BrNamed({name})")
@@ -138,6 +136,12 @@ impl<'tcx> fmt::Debug for ty::ConstVid<'tcx> {
}
}
+impl fmt::Debug for ty::EffectVid<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "?{}e", self.index)
+ }
+}
+
impl<'tcx> fmt::Debug for ty::TraitRef<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
with_no_trimmed_paths!(fmt::Display::fmt(self, f))
@@ -154,7 +158,7 @@ impl<'tcx> ty::DebugWithInfcx<TyCtxt<'tcx>> for Ty<'tcx> {
}
impl<'tcx> fmt::Debug for Ty<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- with_no_trimmed_paths!(fmt::Display::fmt(self, f))
+ with_no_trimmed_paths!(fmt::Debug::fmt(self.kind(), f))
}
}
@@ -253,6 +257,7 @@ impl<'tcx> fmt::Debug for ty::InferConst<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
InferConst::Var(var) => write!(f, "{var:?}"),
+ InferConst::EffectVar(var) => write!(f, "{var:?}"),
InferConst::Fresh(var) => write!(f, "Fresh({var:?})"),
}
}
@@ -267,6 +272,7 @@ impl<'tcx> DebugWithInfcx<TyCtxt<'tcx>> for ty::InferConst<'tcx> {
None => write!(f, "{:?}", this.data),
Some(universe) => match *this.data {
Var(vid) => write!(f, "?{}_{}c", vid.index, universe.index()),
+ EffectVar(vid) => write!(f, "?{}_{}e", vid.index, universe.index()),
Fresh(_) => {
unreachable!()
}
@@ -335,14 +341,27 @@ impl<'tcx> DebugWithInfcx<TyCtxt<'tcx>> for ty::Const<'tcx> {
this: OptWithInfcx<'_, TyCtxt<'tcx>, InfCtx, &Self>,
f: &mut core::fmt::Formatter<'_>,
) -> core::fmt::Result {
- // This reflects what `Const` looked liked before `Interned` was
- // introduced. We print it like this to avoid having to update expected
- // output in a lot of tests.
+ // If this is a value, we spend some effort to make it look nice.
+ if let ConstKind::Value(_) = this.data.kind() {
+ return ty::tls::with(move |tcx| {
+ // Somehow trying to lift the valtree results in lifetime errors, so we lift the
+ // entire constant.
+ let lifted = tcx.lift(*this.data).unwrap();
+ let ConstKind::Value(valtree) = lifted.kind() else {
+ bug!("we checked that this is a valtree")
+ };
+ let cx = FmtPrinter::new(tcx, Namespace::ValueNS);
+ let cx =
+ cx.pretty_print_const_valtree(valtree, lifted.ty(), /*print_ty*/ true)?;
+ f.write_str(&cx.into_buffer())
+ });
+ }
+ // Fall back to something verbose.
write!(
f,
- "Const {{ ty: {:?}, kind: {:?} }}",
- &this.map(|data| data.ty()),
- &this.map(|data| data.kind())
+ "{kind:?}: {ty:?}",
+ ty = &this.map(|data| data.ty()),
+ kind = &this.map(|data| data.kind())
)
}
}
@@ -434,22 +453,17 @@ impl<'tcx, T: DebugWithInfcx<TyCtxt<'tcx>>> DebugWithInfcx<TyCtxt<'tcx>> for ty:
// For things for which the type library provides traversal implementations
// for all Interners, we only need to provide a Lift implementation:
-CloneLiftImpls! {
- (),
- bool,
- usize,
- u16,
- u32,
- u64,
- String,
- rustc_type_ir::DebruijnIndex,
-}
-
-// For things about which the type library does not know, or does not
-// provide any traversal implementations, we need to provide both a Lift
-// implementation and traversal implementations (the latter only for
-// TyCtxt<'_> interners).
-TrivialTypeTraversalAndLiftImpls! {
+TrivialLiftImpls! {
+ (),
+ bool,
+ usize,
+ u64,
+}
+
+// For some things about which the type library does not know, or does not
+// provide any traversal implementations, we need to provide a traversal
+// implementation (only for TyCtxt<'_> interners).
+TrivialTypeTraversalImpls! {
::rustc_target::abi::FieldIdx,
::rustc_target::abi::VariantIdx,
crate::middle::region::Scope,
@@ -459,17 +473,12 @@ TrivialTypeTraversalAndLiftImpls! {
::rustc_ast::NodeId,
::rustc_span::symbol::Symbol,
::rustc_hir::def::Res,
- ::rustc_hir::def_id::DefId,
::rustc_hir::def_id::LocalDefId,
::rustc_hir::HirId,
::rustc_hir::MatchSource,
- ::rustc_hir::Mutability,
- ::rustc_hir::Unsafety,
::rustc_target::asm::InlineAsmRegOrRegClass,
- ::rustc_target::spec::abi::Abi,
crate::mir::coverage::CounterId,
crate::mir::coverage::ExpressionId,
- crate::mir::coverage::MappedExpressionIndex,
crate::mir::Local,
crate::mir::Promoted,
crate::traits::Reveal,
@@ -484,16 +493,12 @@ TrivialTypeTraversalAndLiftImpls! {
crate::ty::AssocItem,
crate::ty::AssocKind,
crate::ty::AliasKind,
- crate::ty::AliasRelationDirection,
crate::ty::Placeholder<crate::ty::BoundRegion>,
crate::ty::Placeholder<crate::ty::BoundTy>,
crate::ty::Placeholder<ty::BoundVar>,
- crate::ty::ClosureKind,
crate::ty::FreeRegion,
crate::ty::InferTy,
crate::ty::IntVarValue,
- crate::ty::ParamConst,
- crate::ty::ParamTy,
crate::ty::adjustment::PointerCoercion,
crate::ty::RegionVid,
crate::ty::UniverseIndex,
@@ -501,32 +506,30 @@ TrivialTypeTraversalAndLiftImpls! {
::rustc_span::Span,
::rustc_span::symbol::Ident,
::rustc_errors::ErrorGuaranteed,
- interpret::Scalar,
- rustc_target::abi::Size,
ty::BoundVar,
+ ty::ValTree<'tcx>,
}
-
+// For some things about which the type library does not know, or does not
+// provide any traversal implementations, we need to provide a traversal
+// implementation and a lift implementation (the former only for TyCtxt<'_>
+// interners).
TrivialTypeTraversalAndLiftImpls! {
- ty::ValTree<'tcx>,
+ ::rustc_hir::def_id::DefId,
+ ::rustc_hir::Mutability,
+ ::rustc_hir::Unsafety,
+ ::rustc_target::spec::abi::Abi,
+ crate::ty::AliasRelationDirection,
+ crate::ty::ClosureKind,
+ crate::ty::ParamConst,
+ crate::ty::ParamTy,
+ interpret::Scalar,
+ interpret::AllocId,
+ rustc_target::abi::Size,
}
///////////////////////////////////////////////////////////////////////////
// Lift implementations
-impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>> Lift<'tcx> for (A, B) {
- type Lifted = (A::Lifted, B::Lifted);
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- Some((tcx.lift(self.0)?, tcx.lift(self.1)?))
- }
-}
-
-impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>, C: Lift<'tcx>> Lift<'tcx> for (A, B, C) {
- type Lifted = (A::Lifted, B::Lifted, C::Lifted);
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- Some((tcx.lift(self.0)?, tcx.lift(self.1)?, tcx.lift(self.2)?))
- }
-}
-
impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Option<T> {
type Lifted = Option<T::Lifted>;
fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
@@ -537,50 +540,6 @@ impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Option<T> {
}
}
-impl<'tcx, T: Lift<'tcx>, E: Lift<'tcx>> Lift<'tcx> for Result<T, E> {
- type Lifted = Result<T::Lifted, E::Lifted>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- match self {
- Ok(x) => tcx.lift(x).map(Ok),
- Err(e) => tcx.lift(e).map(Err),
- }
- }
-}
-
-impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Box<T> {
- type Lifted = Box<T::Lifted>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- Some(Box::new(tcx.lift(*self)?))
- }
-}
-
-impl<'tcx, T: Lift<'tcx> + Clone> Lift<'tcx> for Rc<T> {
- type Lifted = Rc<T::Lifted>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- Some(Rc::new(tcx.lift(self.as_ref().clone())?))
- }
-}
-
-impl<'tcx, T: Lift<'tcx> + Clone> Lift<'tcx> for Arc<T> {
- type Lifted = Arc<T::Lifted>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- Some(Arc::new(tcx.lift(self.as_ref().clone())?))
- }
-}
-impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Vec<T> {
- type Lifted = Vec<T::Lifted>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- self.into_iter().map(|v| tcx.lift(v)).collect()
- }
-}
-
-impl<'tcx, I: Idx, T: Lift<'tcx>> Lift<'tcx> for IndexVec<I, T> {
- type Lifted = IndexVec<I, T::Lifted>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- self.into_iter().map(|e| tcx.lift(e)).collect()
- }
-}
-
impl<'a, 'tcx> Lift<'tcx> for Term<'a> {
type Lifted = ty::Term<'tcx>;
fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
@@ -593,13 +552,6 @@ impl<'a, 'tcx> Lift<'tcx> for Term<'a> {
)
}
}
-impl<'a, 'tcx> Lift<'tcx> for ty::ParamEnv<'a> {
- type Lifted = ty::ParamEnv<'tcx>;
- fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
- tcx.lift(self.caller_bounds())
- .map(|caller_bounds| ty::ParamEnv::new(caller_bounds, self.reveal()))
- }
-}
///////////////////////////////////////////////////////////////////////////
// Traversal implementations.
@@ -705,9 +657,8 @@ impl<'tcx> TypeSuperFoldable<TyCtxt<'tcx>> for Ty<'tcx> {
ty::Generator(did, args, movability) => {
ty::Generator(did, args.try_fold_with(folder)?, movability)
}
- ty::GeneratorWitness(types) => ty::GeneratorWitness(types.try_fold_with(folder)?),
- ty::GeneratorWitnessMIR(did, args) => {
- ty::GeneratorWitnessMIR(did, args.try_fold_with(folder)?)
+ ty::GeneratorWitness(did, args) => {
+ ty::GeneratorWitness(did, args.try_fold_with(folder)?)
}
ty::Closure(did, args) => ty::Closure(did, args.try_fold_with(folder)?),
ty::Alias(kind, data) => ty::Alias(kind, data.try_fold_with(folder)?),
@@ -756,8 +707,7 @@ impl<'tcx> TypeSuperVisitable<TyCtxt<'tcx>> for Ty<'tcx> {
ty.visit_with(visitor)
}
ty::Generator(_did, ref args, _) => args.visit_with(visitor),
- ty::GeneratorWitness(ref types) => types.visit_with(visitor),
- ty::GeneratorWitnessMIR(_did, ref args) => args.visit_with(visitor),
+ ty::GeneratorWitness(_did, ref args) => args.visit_with(visitor),
ty::Closure(_did, ref args) => args.visit_with(visitor),
ty::Alias(_, ref data) => data.visit_with(visitor),
diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs
index 0291cdd6c..1e57392e0 100644
--- a/compiler/rustc_middle/src/ty/sty.rs
+++ b/compiler/rustc_middle/src/ty/sty.rs
@@ -69,7 +69,7 @@ pub struct FreeRegion {
#[derive(HashStable)]
pub enum BoundRegionKind {
/// An anonymous region parameter for a given fn (&T)
- BrAnon(Option<Span>),
+ BrAnon,
/// Named region parameters for functions (a in &'a T)
///
@@ -351,7 +351,7 @@ impl<'tcx> ClosureArgs<'tcx> {
}
/// Similar to `ClosureArgs`; see the above documentation for more.
-#[derive(Copy, Clone, PartialEq, Eq, Debug, TypeFoldable, TypeVisitable, Lift)]
+#[derive(Copy, Clone, PartialEq, Eq, Debug, TypeFoldable, TypeVisitable)]
pub struct GeneratorArgs<'tcx> {
pub args: GenericArgsRef<'tcx>,
}
@@ -725,7 +725,7 @@ impl<'tcx> PolyExistentialPredicate<'tcx> {
self.rebind(tr).with_self_ty(tcx, self_ty).to_predicate(tcx)
}
ExistentialPredicate::Projection(p) => {
- self.rebind(p.with_self_ty(tcx, self_ty)).to_predicate(tcx)
+ ty::Clause::from_projection_clause(tcx, self.rebind(p.with_self_ty(tcx, self_ty)))
}
ExistentialPredicate::AutoTrait(did) => {
let generics = tcx.generics_of(did);
@@ -1223,7 +1223,7 @@ impl<'tcx> AliasTy<'tcx> {
DefKind::AssocTy if let DefKind::Impl { of_trait: false } = tcx.def_kind(tcx.parent(self.def_id)) => ty::Inherent,
DefKind::AssocTy => ty::Projection,
DefKind::OpaqueTy => ty::Opaque,
- DefKind::TyAlias { .. } => ty::Weak,
+ DefKind::TyAlias => ty::Weak,
kind => bug!("unexpected DefKind in AliasTy: {kind:?}"),
}
}
@@ -1305,7 +1305,7 @@ impl<'tcx> AliasTy<'tcx> {
}
}
-#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable, Lift)]
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)]
pub struct GenSig<'tcx> {
pub resume_ty: Ty<'tcx>,
pub yield_ty: Ty<'tcx>,
@@ -1465,7 +1465,7 @@ impl<'tcx> Region<'tcx> {
bound_region: ty::BoundRegion,
) -> Region<'tcx> {
// Use a pre-interned one when possible.
- if let ty::BoundRegion { var, kind: ty::BrAnon(None) } = bound_region
+ if let ty::BoundRegion { var, kind: ty::BrAnon } = bound_region
&& let Some(inner) = tcx.lifetimes.re_late_bounds.get(debruijn.as_usize())
&& let Some(re) = inner.get(var.as_usize()).copied()
{
@@ -1577,6 +1577,20 @@ pub struct ConstVid<'tcx> {
pub phantom: PhantomData<&'tcx ()>,
}
+/// An **effect** **v**ariable **ID**.
+///
+/// Handling effect infer variables happens separately from const infer variables
+/// because we do not want to reuse any of the const infer machinery. If we try to
+/// relate an effect variable with a normal one, we would ICE, which can catch bugs
+/// where we are not correctly using the effect var for an effect param. Fallback
+/// is also implemented on top of having separate effect and normal const variables.
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[derive(TyEncodable, TyDecodable)]
+pub struct EffectVid<'tcx> {
+ pub index: u32,
+ pub phantom: PhantomData<&'tcx ()>,
+}
+
rustc_index::newtype_index! {
/// A **region** (lifetime) **v**ariable **ID**.
#[derive(HashStable)]
@@ -1945,7 +1959,7 @@ impl<'tcx> Ty<'tcx> {
(kind, tcx.def_kind(alias_ty.def_id)),
(ty::Opaque, DefKind::OpaqueTy)
| (ty::Projection | ty::Inherent, DefKind::AssocTy)
- | (ty::Weak, DefKind::TyAlias { .. })
+ | (ty::Weak, DefKind::TyAlias)
);
Ty::new(tcx, Alias(kind, alias_ty))
}
@@ -2151,18 +2165,10 @@ impl<'tcx> Ty<'tcx> {
#[inline]
pub fn new_generator_witness(
tcx: TyCtxt<'tcx>,
- types: ty::Binder<'tcx, &'tcx List<Ty<'tcx>>>,
- ) -> Ty<'tcx> {
- Ty::new(tcx, GeneratorWitness(types))
- }
-
- #[inline]
- pub fn new_generator_witness_mir(
- tcx: TyCtxt<'tcx>,
id: DefId,
args: GenericArgsRef<'tcx>,
) -> Ty<'tcx> {
- Ty::new(tcx, GeneratorWitnessMIR(id, args))
+ Ty::new(tcx, GeneratorWitness(id, args))
}
// misc
@@ -2536,7 +2542,7 @@ impl<'tcx> Ty<'tcx> {
/// Checks whether a type recursively contains any closure
///
- /// Example: `Option<[closure@file.rs:4:20]>` returns true
+ /// Example: `Option<{closure@file.rs:4:20}>` returns true
pub fn contains_closure(self) -> bool {
struct ContainsClosureVisitor;
@@ -2692,7 +2698,6 @@ impl<'tcx> Ty<'tcx> {
| ty::Dynamic(..)
| ty::Closure(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Never
| ty::Tuple(_)
| ty::Error(_)
@@ -2728,13 +2733,14 @@ impl<'tcx> Ty<'tcx> {
| ty::Ref(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Array(..)
| ty::Closure(..)
| ty::Never
| ty::Error(_)
// Extern types have metadata = ().
| ty::Foreign(..)
+ // `dyn*` has no metadata
+ | ty::Dynamic(_, _, DynKind::DynStar)
// If returned by `struct_tail_without_normalization` this is a unit struct
// without any fields, or not a struct, and therefore is Sized.
| ty::Adt(..)
@@ -2743,7 +2749,7 @@ impl<'tcx> Ty<'tcx> {
| ty::Tuple(..) => (tcx.types.unit, false),
ty::Str | ty::Slice(_) => (tcx.types.usize, false),
- ty::Dynamic(..) => {
+ ty::Dynamic(_, _, DynKind::Dyn) => {
let dyn_metadata = tcx.require_lang_item(LangItem::DynMetadata, None);
(tcx.type_of(dyn_metadata).instantiate(tcx, &[tail.into()]), false)
},
@@ -2815,7 +2821,6 @@ impl<'tcx> Ty<'tcx> {
| ty::Ref(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Array(..)
| ty::Closure(..)
| ty::Never
@@ -2857,7 +2862,7 @@ impl<'tcx> Ty<'tcx> {
| ty::Uint(..)
| ty::Float(..) => true,
- // The voldemort ZSTs are fine.
+ // ZST which can't be named are fine.
ty::FnDef(..) => true,
ty::Array(element_ty, _len) => element_ty.is_trivially_pure_clone_copy(),
@@ -2878,7 +2883,7 @@ impl<'tcx> Ty<'tcx> {
// anything with custom metadata it might be more complicated.
ty::Ref(_, _, hir::Mutability::Not) | ty::RawPtr(..) => false,
- ty::Generator(..) | ty::GeneratorWitness(..) | ty::GeneratorWitnessMIR(..) => false,
+ ty::Generator(..) | ty::GeneratorWitness(..) => false,
// Might be, but not "trivial" so just giving the safe answer.
ty::Adt(..) | ty::Closure(..) => false,
@@ -2929,6 +2934,37 @@ impl<'tcx> Ty<'tcx> {
_ => false,
}
}
+
+ /// Returns `true` when the outermost type cannot be further normalized,
+ /// resolved, or substituted. This includes all primitive types, but also
+ /// things like ADTs and trait objects, sice even if their arguments or
+ /// nested types may be further simplified, the outermost [`TyKind`] or
+ /// type constructor remains the same.
+ pub fn is_known_rigid(self) -> bool {
+ match self.kind() {
+ Bool
+ | Char
+ | Int(_)
+ | Uint(_)
+ | Float(_)
+ | Adt(_, _)
+ | Foreign(_)
+ | Str
+ | Array(_, _)
+ | Slice(_)
+ | RawPtr(_)
+ | Ref(_, _, _)
+ | FnDef(_, _)
+ | FnPtr(_)
+ | Dynamic(_, _, _)
+ | Closure(_, _)
+ | Generator(_, _, _)
+ | GeneratorWitness(..)
+ | Never
+ | Tuple(_) => true,
+ Error(_) | Infer(_) | Alias(_, _) | Param(_) | Bound(_, _) | Placeholder(_) => false,
+ }
+ }
}
/// Extra information about why we ended up with a particular variance.
@@ -2974,7 +3010,7 @@ mod size_asserts {
use super::*;
use rustc_data_structures::static_assert_size;
// tidy-alphabetical-start
- static_assert_size!(RegionKind<'_>, 28);
+ static_assert_size!(RegionKind<'_>, 24);
static_assert_size!(TyKind<'_>, 32);
// tidy-alphabetical-end
}
diff --git a/compiler/rustc_middle/src/ty/trait_def.rs b/compiler/rustc_middle/src/ty/trait_def.rs
index 6e55e7915..bf9b24493 100644
--- a/compiler/rustc_middle/src/ty/trait_def.rs
+++ b/compiler/rustc_middle/src/ty/trait_def.rs
@@ -90,6 +90,10 @@ pub struct TraitImpls {
}
impl TraitImpls {
+ pub fn is_empty(&self) -> bool {
+ self.blanket_impls.is_empty() && self.non_blanket_impls.is_empty()
+ }
+
pub fn blanket_impls(&self) -> &[DefId] {
self.blanket_impls.as_slice()
}
diff --git a/compiler/rustc_middle/src/ty/typeck_results.rs b/compiler/rustc_middle/src/ty/typeck_results.rs
index 327cd0a5d..a44224e4d 100644
--- a/compiler/rustc_middle/src/ty/typeck_results.rs
+++ b/compiler/rustc_middle/src/ty/typeck_results.rs
@@ -165,7 +165,7 @@ pub struct TypeckResults<'tcx> {
/// reading places that are mentioned in a closure (because of _ patterns). However,
/// to ensure the places are initialized, we introduce fake reads.
/// Consider these two examples:
- /// ``` (discriminant matching with only wildcard arm)
+ /// ```ignore (discriminant matching with only wildcard arm)
/// let x: u8;
/// let c = || match x { _ => () };
/// ```
@@ -173,7 +173,7 @@ pub struct TypeckResults<'tcx> {
/// want to capture it. However, we do still want an error here, because `x` should have
/// to be initialized at the point where c is created. Therefore, we add a "fake read"
/// instead.
- /// ``` (destructured assignments)
+ /// ```ignore (destructured assignments)
/// let c = || {
/// let (t1, t2) = t;
/// }
@@ -189,10 +189,6 @@ pub struct TypeckResults<'tcx> {
/// Details may be find in `rustc_hir_analysis::check::rvalue_scopes`.
pub rvalue_scopes: RvalueScopes,
- /// Stores the type, expression, span and optional scope span of all types
- /// that are live across the yield of this generator (if a generator).
- pub generator_interior_types: ty::Binder<'tcx, Vec<GeneratorInteriorTypeCause<'tcx>>>,
-
/// Stores the predicates that apply on generator witness types.
/// formatting modified file tests/ui/generator/retain-resume-ref.rs
pub generator_interior_predicates:
@@ -212,49 +208,6 @@ pub struct TypeckResults<'tcx> {
offset_of_data: ItemLocalMap<(Ty<'tcx>, Vec<FieldIdx>)>,
}
-/// Whenever a value may be live across a generator yield, the type of that value winds up in the
-/// `GeneratorInteriorTypeCause` struct. This struct adds additional information about such
-/// captured types that can be useful for diagnostics. In particular, it stores the span that
-/// caused a given type to be recorded, along with the scope that enclosed the value (which can
-/// be used to find the await that the value is live across).
-///
-/// For example:
-///
-/// ```ignore (pseudo-Rust)
-/// async move {
-/// let x: T = expr;
-/// foo.await
-/// ...
-/// }
-/// ```
-///
-/// Here, we would store the type `T`, the span of the value `x`, the "scope-span" for
-/// the scope that contains `x`, the expr `T` evaluated from, and the span of `foo.await`.
-#[derive(TyEncodable, TyDecodable, Clone, Debug, Eq, Hash, PartialEq, HashStable)]
-#[derive(TypeFoldable, TypeVisitable)]
-pub struct GeneratorInteriorTypeCause<'tcx> {
- /// Type of the captured binding.
- pub ty: Ty<'tcx>,
- /// Span of the binding that was captured.
- pub span: Span,
- /// Span of the scope of the captured binding.
- pub scope_span: Option<Span>,
- /// Span of `.await` or `yield` expression.
- pub yield_span: Span,
- /// Expr which the type evaluated from.
- pub expr: Option<hir::HirId>,
-}
-
-// This type holds diagnostic information on generators and async functions across crate boundaries
-// and is used to provide better error messages
-#[derive(TyEncodable, TyDecodable, Clone, Debug, HashStable)]
-pub struct GeneratorDiagnosticData<'tcx> {
- pub generator_interior_types: ty::Binder<'tcx, Vec<GeneratorInteriorTypeCause<'tcx>>>,
- pub hir_owner: DefId,
- pub nodes_types: ItemLocalMap<Ty<'tcx>>,
- pub adjustments: ItemLocalMap<Vec<ty::adjustment::Adjustment<'tcx>>>,
-}
-
impl<'tcx> TypeckResults<'tcx> {
pub fn new(hir_owner: OwnerId) -> TypeckResults<'tcx> {
TypeckResults {
@@ -278,7 +231,6 @@ impl<'tcx> TypeckResults<'tcx> {
closure_min_captures: Default::default(),
closure_fake_reads: Default::default(),
rvalue_scopes: Default::default(),
- generator_interior_types: ty::Binder::dummy(Default::default()),
generator_interior_predicates: Default::default(),
treat_byte_string_as_slice: Default::default(),
closure_size_eval: Default::default(),
@@ -351,28 +303,6 @@ impl<'tcx> TypeckResults<'tcx> {
LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.node_types }
}
- pub fn get_generator_diagnostic_data(&self) -> GeneratorDiagnosticData<'tcx> {
- let generator_interior_type = self.generator_interior_types.map_bound_ref(|vec| {
- vec.iter()
- .map(|item| {
- GeneratorInteriorTypeCause {
- ty: item.ty,
- span: item.span,
- scope_span: item.scope_span,
- yield_span: item.yield_span,
- expr: None, //FIXME: Passing expression over crate boundaries is impossible at the moment
- }
- })
- .collect::<Vec<_>>()
- });
- GeneratorDiagnosticData {
- generator_interior_types: generator_interior_type,
- hir_owner: self.hir_owner.to_def_id(),
- nodes_types: self.node_types.clone(),
- adjustments: self.adjustments.clone(),
- }
- }
-
pub fn node_type(&self, id: hir::HirId) -> Ty<'tcx> {
self.node_type_opt(id).unwrap_or_else(|| {
bug!("node_type: no type for node {}", tls::with(|tcx| tcx.hir().node_to_string(id)))
@@ -654,7 +584,7 @@ rustc_index::newtype_index! {
pub type CanonicalUserTypeAnnotations<'tcx> =
IndexVec<UserTypeAnnotationIndex, CanonicalUserTypeAnnotation<'tcx>>;
-#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable, Lift)]
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
pub struct CanonicalUserTypeAnnotation<'tcx> {
pub user_ty: Box<CanonicalUserType<'tcx>>,
pub span: Span,
@@ -714,7 +644,7 @@ impl<'tcx> CanonicalUserType<'tcx> {
/// from constants that are named via paths, like `Foo::<A>::new` and
/// so forth.
#[derive(Copy, Clone, Debug, PartialEq, TyEncodable, TyDecodable)]
-#[derive(Eq, Hash, HashStable, TypeFoldable, TypeVisitable, Lift)]
+#[derive(Eq, Hash, HashStable, TypeFoldable, TypeVisitable)]
pub enum UserType<'tcx> {
Ty(Ty<'tcx>),
@@ -722,3 +652,14 @@ pub enum UserType<'tcx> {
/// given substitutions applied.
TypeOf(DefId, UserArgs<'tcx>),
}
+
+impl<'tcx> std::fmt::Display for UserType<'tcx> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ Self::Ty(arg0) => {
+ ty::print::with_no_trimmed_paths!(write!(f, "Ty({})", arg0))
+ }
+ Self::TypeOf(arg0, arg1) => write!(f, "TypeOf({:?}, {:?})", arg0, arg1),
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs
index 564f982f8..964f38a65 100644
--- a/compiler/rustc_middle/src/ty/util.rs
+++ b/compiler/rustc_middle/src/ty/util.rs
@@ -156,7 +156,7 @@ impl<'tcx> TyCtxt<'tcx> {
| DefKind::Enum
| DefKind::Trait
| DefKind::OpaqueTy
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::AssocTy
@@ -855,7 +855,7 @@ impl<'tcx> OpaqueTypeExpander<'tcx> {
let hidden_ty = bty.instantiate(self.tcx, args);
self.fold_ty(hidden_ty);
}
- let expanded_ty = Ty::new_generator_witness_mir(self.tcx, def_id, args);
+ let expanded_ty = Ty::new_generator_witness(self.tcx, def_id, args);
self.expanded_cache.insert((def_id, args), expanded_ty);
expanded_ty
}
@@ -888,7 +888,7 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for OpaqueTypeExpander<'tcx> {
t
};
if self.expand_generators {
- if let ty::GeneratorWitnessMIR(def_id, args) = *t.kind() {
+ if let ty::GeneratorWitness(def_id, args) = *t.kind() {
t = self.expand_generator(def_id, args).unwrap_or(t);
}
}
@@ -1025,8 +1025,7 @@ impl<'tcx> Ty<'tcx> {
| ty::Dynamic(..)
| ty::Foreign(_)
| ty::Generator(..)
- | ty::GeneratorWitness(_)
- | ty::GeneratorWitnessMIR(..)
+ | ty::GeneratorWitness(..)
| ty::Infer(_)
| ty::Alias(..)
| ty::Param(_)
@@ -1065,8 +1064,7 @@ impl<'tcx> Ty<'tcx> {
| ty::Dynamic(..)
| ty::Foreign(_)
| ty::Generator(..)
- | ty::GeneratorWitness(_)
- | ty::GeneratorWitnessMIR(..)
+ | ty::GeneratorWitness(..)
| ty::Infer(_)
| ty::Alias(..)
| ty::Param(_)
@@ -1099,8 +1097,10 @@ impl<'tcx> Ty<'tcx> {
// This doesn't depend on regions, so try to minimize distinct
// query keys used.
// If normalization fails, we just use `query_ty`.
- let query_ty =
- tcx.try_normalize_erasing_regions(param_env, query_ty).unwrap_or(query_ty);
+ debug_assert!(!param_env.has_infer());
+ let query_ty = tcx
+ .try_normalize_erasing_regions(param_env, query_ty)
+ .unwrap_or_else(|_| tcx.erase_regions(query_ty));
tcx.needs_drop_raw(param_env.and(query_ty))
}
@@ -1194,10 +1194,7 @@ impl<'tcx> Ty<'tcx> {
false
}
- ty::Foreign(_)
- | ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
- | ty::Error(_) => false,
+ ty::Foreign(_) | ty::GeneratorWitness(..) | ty::Error(_) => false,
}
}
@@ -1292,8 +1289,6 @@ pub fn needs_drop_components<'tcx>(
| ty::FnDef(..)
| ty::FnPtr(_)
| ty::Char
- | ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::RawPtr(_)
| ty::Ref(..)
| ty::Str => Ok(SmallVec::new()),
@@ -1333,7 +1328,8 @@ pub fn needs_drop_components<'tcx>(
| ty::Placeholder(..)
| ty::Infer(_)
| ty::Closure(..)
- | ty::Generator(..) => Ok(smallvec![ty]),
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..) => Ok(smallvec![ty]),
}
}
@@ -1364,11 +1360,7 @@ pub fn is_trivially_const_drop(ty: Ty<'_>) -> bool {
// Not trivial because they have components, and instead of looking inside,
// we'll just perform trait selection.
- ty::Closure(..)
- | ty::Generator(..)
- | ty::GeneratorWitness(_)
- | ty::GeneratorWitnessMIR(..)
- | ty::Adt(..) => false,
+ ty::Closure(..) | ty::Generator(..) | ty::GeneratorWitness(..) | ty::Adt(..) => false,
ty::Array(ty, _) | ty::Slice(ty) => is_trivially_const_drop(ty),
diff --git a/compiler/rustc_middle/src/ty/visit.rs b/compiler/rustc_middle/src/ty/visit.rs
index 156eda477..95ba6c471 100644
--- a/compiler/rustc_middle/src/ty/visit.rs
+++ b/compiler/rustc_middle/src/ty/visit.rs
@@ -33,14 +33,6 @@ pub trait TypeVisitableExt<'tcx>: TypeVisitable<TyCtxt<'tcx>> {
}
fn has_type_flags(&self, flags: TypeFlags) -> bool {
- // N.B. Even though this uses a visitor, the visitor does not actually
- // recurse through the whole `TypeVisitable` implementor type.
- //
- // Instead it stops on the first "level", visiting types, regions,
- // consts and predicates just fetches their type flags.
- //
- // Thus this is a lot faster than it might seem and should be
- // optimized to a simple field access.
let res =
self.visit_with(&mut HasTypeFlagsVisitor { flags }).break_value() == Some(FoundFlags);
trace!(?self, ?flags, ?res, "has_type_flags");
@@ -485,11 +477,36 @@ impl std::fmt::Debug for HasTypeFlagsVisitor {
}
}
+// Note: this visitor traverses values down to the level of
+// `Ty`/`Const`/`Predicate`, but not within those types. This is because the
+// type flags at the outer layer are enough. So it's faster than it first
+// looks, particular for `Ty`/`Predicate` where it's just a field access.
+//
+// N.B. The only case where this isn't totally true is binders, which also
+// add `HAS_{RE,TY,CT}_LATE_BOUND` flag depending on the *bound variables* that
+// are present, regardless of whether those bound variables are used. This
+// is important for anonymization of binders in `TyCtxt::erase_regions`. We
+// specifically detect this case in `visit_binder`.
impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for HasTypeFlagsVisitor {
type BreakTy = FoundFlags;
+ fn visit_binder<T: TypeVisitable<TyCtxt<'tcx>>>(
+ &mut self,
+ t: &Binder<'tcx, T>,
+ ) -> ControlFlow<Self::BreakTy> {
+ // If we're looking for the HAS_BINDER_VARS flag, check if the
+ // binder has vars. This won't be present in the binder's bound
+ // value, so we need to check here too.
+ if self.flags.intersects(TypeFlags::HAS_BINDER_VARS) && !t.bound_vars().is_empty() {
+ return ControlFlow::Break(FoundFlags);
+ }
+
+ t.super_visit_with(self)
+ }
+
#[inline]
fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // Note: no `super_visit_with` call.
let flags = t.flags();
if flags.intersects(self.flags) {
ControlFlow::Break(FoundFlags)
@@ -500,6 +517,7 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for HasTypeFlagsVisitor {
#[inline]
fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // Note: no `super_visit_with` call, as usual for `Region`.
let flags = r.type_flags();
if flags.intersects(self.flags) {
ControlFlow::Break(FoundFlags)
@@ -510,6 +528,7 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for HasTypeFlagsVisitor {
#[inline]
fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // Note: no `super_visit_with` call.
let flags = FlagComputation::for_const(c);
trace!(r.flags=?flags);
if flags.intersects(self.flags) {
@@ -521,6 +540,7 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for HasTypeFlagsVisitor {
#[inline]
fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // Note: no `super_visit_with` call.
if predicate.flags().intersects(self.flags) {
ControlFlow::Break(FoundFlags)
} else {
diff --git a/compiler/rustc_middle/src/ty/vtable.rs b/compiler/rustc_middle/src/ty/vtable.rs
index 97402caa0..62f41921d 100644
--- a/compiler/rustc_middle/src/ty/vtable.rs
+++ b/compiler/rustc_middle/src/ty/vtable.rs
@@ -84,7 +84,7 @@ pub(super) fn vtable_allocation_provider<'tcx>(
let scalar = match entry {
VtblEntry::MetadataDropInPlace => {
let instance = ty::Instance::resolve_drop_in_place(tcx, ty);
- let fn_alloc_id = tcx.create_fn_alloc(instance);
+ let fn_alloc_id = tcx.reserve_and_set_fn_alloc(instance);
let fn_ptr = Pointer::from(fn_alloc_id);
Scalar::from_pointer(fn_ptr, &tcx)
}
@@ -94,7 +94,7 @@ pub(super) fn vtable_allocation_provider<'tcx>(
VtblEntry::Method(instance) => {
// Prepare the fn ptr we write into the vtable.
let instance = instance.polymorphize(tcx);
- let fn_alloc_id = tcx.create_fn_alloc(instance);
+ let fn_alloc_id = tcx.reserve_and_set_fn_alloc(instance);
let fn_ptr = Pointer::from(fn_alloc_id);
Scalar::from_pointer(fn_ptr, &tcx)
}
@@ -112,5 +112,5 @@ pub(super) fn vtable_allocation_provider<'tcx>(
}
vtable.mutability = Mutability::Not;
- tcx.create_memory_alloc(tcx.mk_const_alloc(vtable))
+ tcx.reserve_and_set_memory_alloc(tcx.mk_const_alloc(vtable))
}
diff --git a/compiler/rustc_middle/src/ty/walk.rs b/compiler/rustc_middle/src/ty/walk.rs
index 7c3d9ed39..a86ff64bd 100644
--- a/compiler/rustc_middle/src/ty/walk.rs
+++ b/compiler/rustc_middle/src/ty/walk.rs
@@ -190,14 +190,11 @@ fn push_inner<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent: GenericArg<'tcx>)
ty::Adt(_, args)
| ty::Closure(_, args)
| ty::Generator(_, args, _)
- | ty::GeneratorWitnessMIR(_, args)
+ | ty::GeneratorWitness(_, args)
| ty::FnDef(_, args) => {
stack.extend(args.iter().rev());
}
ty::Tuple(ts) => stack.extend(ts.iter().rev().map(GenericArg::from)),
- ty::GeneratorWitness(ts) => {
- stack.extend(ts.skip_binder().iter().rev().map(|ty| ty.into()));
- }
ty::FnPtr(sig) => {
stack.push(sig.skip_binder().output().into());
stack.extend(sig.skip_binder().inputs().iter().copied().rev().map(|ty| ty.into()));
diff --git a/compiler/rustc_middle/src/util/find_self_call.rs b/compiler/rustc_middle/src/util/find_self_call.rs
index 1b845334c..9f1e4ac11 100644
--- a/compiler/rustc_middle/src/util/find_self_call.rs
+++ b/compiler/rustc_middle/src/util/find_self_call.rs
@@ -17,8 +17,8 @@ pub fn find_self_call<'tcx>(
&body[block].terminator
{
debug!("find_self_call: func={:?}", func);
- if let Operand::Constant(box Constant { literal, .. }) = func {
- if let ty::FnDef(def_id, fn_args) = *literal.ty().kind() {
+ if let Operand::Constant(box ConstOperand { const_, .. }) = func {
+ if let ty::FnDef(def_id, fn_args) = *const_.ty().kind() {
if let Some(ty::AssocItem { fn_has_self_parameter: true, .. }) =
tcx.opt_associated_item(def_id)
{
diff --git a/compiler/rustc_middle/src/util/mod.rs b/compiler/rustc_middle/src/util/mod.rs
index 53b425789..8c9598847 100644
--- a/compiler/rustc_middle/src/util/mod.rs
+++ b/compiler/rustc_middle/src/util/mod.rs
@@ -5,3 +5,27 @@ pub mod find_self_call;
pub use call_kind::{call_kind, CallDesugaringKind, CallKind};
pub use find_self_call::find_self_call;
+
+#[derive(Default, Copy, Clone)]
+pub struct Providers {
+ pub queries: rustc_middle::query::Providers,
+ pub extern_queries: rustc_middle::query::ExternProviders,
+ pub hooks: rustc_middle::hooks::Providers,
+}
+
+/// Backwards compatibility hack to keep the diff small. This
+/// gives direct access to the `queries` field's fields, which
+/// are what almost everything wants access to.
+impl std::ops::DerefMut for Providers {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.queries
+ }
+}
+
+impl std::ops::Deref for Providers {
+ type Target = rustc_middle::query::Providers;
+
+ fn deref(&self) -> &Self::Target {
+ &self.queries
+ }
+}
diff --git a/compiler/rustc_middle/src/values.rs b/compiler/rustc_middle/src/values.rs
index 384a36843..578d8e7a9 100644
--- a/compiler/rustc_middle/src/values.rs
+++ b/compiler/rustc_middle/src/values.rs
@@ -1,4 +1,5 @@
-use crate::dep_graph::DepKind;
+use crate::dep_graph::dep_kinds;
+use crate::query::plumbing::CyclePlaceholder;
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::{pluralize, struct_span_err, Applicability, MultiSpan};
use rustc_hir as hir;
@@ -8,20 +9,26 @@ use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_query_system::query::QueryInfo;
use rustc_query_system::Value;
use rustc_span::def_id::LocalDefId;
-use rustc_span::Span;
+use rustc_span::{ErrorGuaranteed, Span};
use std::fmt::Write;
-impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for Ty<'_> {
- fn from_cycle_error(tcx: TyCtxt<'tcx>, _: &[QueryInfo<DepKind>]) -> Self {
+impl<'tcx> Value<TyCtxt<'tcx>> for Ty<'_> {
+ fn from_cycle_error(tcx: TyCtxt<'tcx>, _: &[QueryInfo], guar: ErrorGuaranteed) -> Self {
// SAFETY: This is never called when `Self` is not `Ty<'tcx>`.
// FIXME: Represent the above fact in the trait system somehow.
- unsafe { std::mem::transmute::<Ty<'tcx>, Ty<'_>>(Ty::new_misc_error(tcx)) }
+ unsafe { std::mem::transmute::<Ty<'tcx>, Ty<'_>>(Ty::new_error(tcx, guar)) }
}
}
-impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::SymbolName<'_> {
- fn from_cycle_error(tcx: TyCtxt<'tcx>, _: &[QueryInfo<DepKind>]) -> Self {
+impl<'tcx> Value<TyCtxt<'tcx>> for Result<ty::EarlyBinder<Ty<'_>>, CyclePlaceholder> {
+ fn from_cycle_error(_tcx: TyCtxt<'tcx>, _: &[QueryInfo], guar: ErrorGuaranteed) -> Self {
+ Err(CyclePlaceholder(guar))
+ }
+}
+
+impl<'tcx> Value<TyCtxt<'tcx>> for ty::SymbolName<'_> {
+ fn from_cycle_error(tcx: TyCtxt<'tcx>, _: &[QueryInfo], _guar: ErrorGuaranteed) -> Self {
// SAFETY: This is never called when `Self` is not `SymbolName<'tcx>`.
// FIXME: Represent the above fact in the trait system somehow.
unsafe {
@@ -32,12 +39,12 @@ impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::SymbolName<'_> {
}
}
-impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::Binder<'_, ty::FnSig<'_>> {
- fn from_cycle_error(tcx: TyCtxt<'tcx>, stack: &[QueryInfo<DepKind>]) -> Self {
- let err = Ty::new_misc_error(tcx);
+impl<'tcx> Value<TyCtxt<'tcx>> for ty::Binder<'_, ty::FnSig<'_>> {
+ fn from_cycle_error(tcx: TyCtxt<'tcx>, stack: &[QueryInfo], guar: ErrorGuaranteed) -> Self {
+ let err = Ty::new_error(tcx, guar);
let arity = if let Some(frame) = stack.get(0)
- && frame.query.dep_kind == DepKind::fn_sig
+ && frame.query.dep_kind == dep_kinds::fn_sig
&& let Some(def_id) = frame.query.def_id
&& let Some(node) = tcx.hir().get_if_local(def_id)
&& let Some(sig) = node.fn_sig()
@@ -62,12 +69,12 @@ impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::Binder<'_, ty::FnSig<'_>> {
}
}
-impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for Representability {
- fn from_cycle_error(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo<DepKind>]) -> Self {
+impl<'tcx> Value<TyCtxt<'tcx>> for Representability {
+ fn from_cycle_error(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo], _guar: ErrorGuaranteed) -> Self {
let mut item_and_field_ids = Vec::new();
let mut representable_ids = FxHashSet::default();
for info in cycle {
- if info.query.dep_kind == DepKind::representability
+ if info.query.dep_kind == dep_kinds::representability
&& let Some(field_id) = info.query.def_id
&& let Some(field_id) = field_id.as_local()
&& let Some(DefKind::Field) = info.query.def_kind
@@ -81,7 +88,7 @@ impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for Representability {
}
}
for info in cycle {
- if info.query.dep_kind == DepKind::representability_adt_ty
+ if info.query.dep_kind == dep_kinds::representability_adt_ty
&& let Some(def_id) = info.query.ty_adt_id
&& let Some(def_id) = def_id.as_local()
&& !item_and_field_ids.iter().any(|&(id, _)| id == def_id)
@@ -94,23 +101,24 @@ impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for Representability {
}
}
-impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::EarlyBinder<Ty<'_>> {
- fn from_cycle_error(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo<DepKind>]) -> Self {
- ty::EarlyBinder::bind(Ty::from_cycle_error(tcx, cycle))
+impl<'tcx> Value<TyCtxt<'tcx>> for ty::EarlyBinder<Ty<'_>> {
+ fn from_cycle_error(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo], guar: ErrorGuaranteed) -> Self {
+ ty::EarlyBinder::bind(Ty::from_cycle_error(tcx, cycle, guar))
}
}
-impl<'tcx> Value<TyCtxt<'tcx>, DepKind> for ty::EarlyBinder<ty::Binder<'_, ty::FnSig<'_>>> {
- fn from_cycle_error(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo<DepKind>]) -> Self {
- ty::EarlyBinder::bind(ty::Binder::from_cycle_error(tcx, cycle))
+impl<'tcx> Value<TyCtxt<'tcx>> for ty::EarlyBinder<ty::Binder<'_, ty::FnSig<'_>>> {
+ fn from_cycle_error(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo], guar: ErrorGuaranteed) -> Self {
+ ty::EarlyBinder::bind(ty::Binder::from_cycle_error(tcx, cycle, guar))
}
}
-impl<'tcx, T> Value<TyCtxt<'tcx>, DepKind> for Result<T, &'_ ty::layout::LayoutError<'_>> {
- fn from_cycle_error(_tcx: TyCtxt<'tcx>, _cycle: &[QueryInfo<DepKind>]) -> Self {
+impl<'tcx, T> Value<TyCtxt<'tcx>> for Result<T, &'_ ty::layout::LayoutError<'_>> {
+ fn from_cycle_error(_tcx: TyCtxt<'tcx>, _cycle: &[QueryInfo], _guar: ErrorGuaranteed) -> Self {
// tcx.arena.alloc cannot be used because we are not allowed to use &'tcx LayoutError under
// min_specialization. Since this is an error path anyways, leaking doesn't matter (and really,
// tcx.arena.alloc is pretty much equal to leaking).
+ // FIXME: `Cycle` should carry the ErrorGuaranteed
Err(Box::leak(Box::new(ty::layout::LayoutError::Cycle)))
}
}
@@ -209,7 +217,7 @@ fn find_item_ty_spans(
match ty.kind {
hir::TyKind::Path(hir::QPath::Resolved(_, path)) => {
if let Res::Def(kind, def_id) = path.res
- && !matches!(kind, DefKind::TyAlias { .. }) {
+ && !matches!(kind, DefKind::TyAlias) {
let check_params = def_id.as_local().map_or(true, |def_id| {
if def_id == needle {
spans.push(ty.span);
diff --git a/compiler/rustc_mir_build/messages.ftl b/compiler/rustc_mir_build/messages.ftl
index 938f3edd3..ce021923f 100644
--- a/compiler/rustc_mir_build/messages.ftl
+++ b/compiler/rustc_mir_build/messages.ftl
@@ -229,6 +229,9 @@ mir_build_non_exhaustive_patterns_type_not_empty = non-exhaustive patterns: type
.suggestion = ensure that all possible cases are being handled by adding a match arm with a wildcard pattern as shown
.help = ensure that all possible cases are being handled by adding a match arm with a wildcard pattern
+mir_build_non_partial_eq_match =
+ to use a constant of type `{$non_peq_ty}` in a pattern, the type must implement `PartialEq`
+
mir_build_nontrivial_structural_match =
to use a constant of type `{$non_sm_ty}` in a pattern, the constant's initializer must be trivial or `{$non_sm_ty}` must be annotated with `#[derive(PartialEq, Eq)]`
diff --git a/compiler/rustc_mir_build/src/build/cfg.rs b/compiler/rustc_mir_build/src/build/cfg.rs
index 4f1623b4c..fddcf9de7 100644
--- a/compiler/rustc_mir_build/src/build/cfg.rs
+++ b/compiler/rustc_mir_build/src/build/cfg.rs
@@ -49,7 +49,7 @@ impl<'tcx> CFG<'tcx> {
block: BasicBlock,
source_info: SourceInfo,
temp: Place<'tcx>,
- constant: Constant<'tcx>,
+ constant: ConstOperand<'tcx>,
) {
self.push_assign(
block,
@@ -70,10 +70,10 @@ impl<'tcx> CFG<'tcx> {
block,
source_info,
place,
- Rvalue::Use(Operand::Constant(Box::new(Constant {
+ Rvalue::Use(Operand::Constant(Box::new(ConstOperand {
span: source_info.span,
user_ty: None,
- literal: ConstantKind::zero_sized(tcx.types.unit),
+ const_: Const::zero_sized(tcx.types.unit),
}))),
);
}
diff --git a/compiler/rustc_mir_build/src/build/custom/parse.rs b/compiler/rustc_mir_build/src/build/custom/parse.rs
index 60c4a0416..e2ab2cb90 100644
--- a/compiler/rustc_mir_build/src/build/custom/parse.rs
+++ b/compiler/rustc_mir_build/src/build/custom/parse.rs
@@ -1,5 +1,6 @@
use rustc_index::IndexSlice;
-use rustc_middle::{mir::*, thir::*, ty::Ty};
+use rustc_middle::ty::{self, Ty};
+use rustc_middle::{mir::*, thir::*};
use rustc_span::Span;
use super::{PResult, ParseCtxt, ParseError};
@@ -159,6 +160,14 @@ impl<'tcx, 'body> ParseCtxt<'tcx, 'body> {
);
self.parse_local_decls(local_decls.iter().copied())?;
+ let (debuginfo, rest) = parse_by_kind!(self, rest, _, "body with debuginfo",
+ ExprKind::Block { block } => {
+ let block = &self.thir[*block];
+ (&block.stmts, block.expr.unwrap())
+ },
+ );
+ self.parse_debuginfo(debuginfo.iter().copied())?;
+
let block_defs = parse_by_kind!(self, rest, _, "body with block defs",
ExprKind::Block { block } => &self.thir[*block].stmts,
);
@@ -195,6 +204,53 @@ impl<'tcx, 'body> ParseCtxt<'tcx, 'body> {
Ok(())
}
+ fn parse_debuginfo(&mut self, stmts: impl Iterator<Item = StmtId>) -> PResult<()> {
+ for stmt in stmts {
+ let stmt = &self.thir[stmt];
+ let expr = match stmt.kind {
+ StmtKind::Let { span, .. } => {
+ return Err(ParseError {
+ span,
+ item_description: format!("{:?}", stmt),
+ expected: "debuginfo".to_string(),
+ });
+ }
+ StmtKind::Expr { expr, .. } => expr,
+ };
+ let span = self.thir[expr].span;
+ let (name, operand) = parse_by_kind!(self, expr, _, "debuginfo",
+ @call("mir_debuginfo", args) => {
+ (args[0], args[1])
+ },
+ );
+ let name = parse_by_kind!(self, name, _, "debuginfo",
+ ExprKind::Literal { lit, neg: false } => lit,
+ );
+ let Some(name) = name.node.str() else {
+ return Err(ParseError {
+ span,
+ item_description: format!("{:?}", name),
+ expected: "string".to_string(),
+ });
+ };
+ let operand = self.parse_operand(operand)?;
+ let value = match operand {
+ Operand::Constant(c) => VarDebugInfoContents::Const(*c),
+ Operand::Copy(p) | Operand::Move(p) => VarDebugInfoContents::Place(p),
+ };
+ let dbginfo = VarDebugInfo {
+ name,
+ source_info: SourceInfo { span, scope: self.source_scope },
+ composite: None,
+ argument_index: None,
+ value,
+ };
+ self.body.var_debug_info.push(dbginfo);
+ }
+
+ Ok(())
+ }
+
fn parse_let_statement(&mut self, stmt_id: StmtId) -> PResult<(LocalVarId, Ty<'tcx>, Span)> {
let pattern = match &self.thir[stmt_id].kind {
StmtKind::Let { pattern, .. } => pattern,
diff --git a/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs b/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs
index 26662f5de..fd2c57a0a 100644
--- a/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs
+++ b/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs
@@ -1,4 +1,4 @@
-use rustc_middle::mir::interpret::{ConstValue, Scalar};
+use rustc_middle::mir::interpret::Scalar;
use rustc_middle::mir::tcx::PlaceTy;
use rustc_middle::ty::cast::mir_cast_kind;
use rustc_middle::{mir::*, thir::*, ty};
@@ -100,7 +100,7 @@ impl<'tcx, 'body> ParseCtxt<'tcx, 'body> {
expected: "constant pattern".to_string(),
});
};
- values.push(value.eval_bits(self.tcx, self.param_env, arm.pattern.ty));
+ values.push(value.eval_bits(self.tcx, self.param_env));
targets.push(self.parse_block(arm.body)?);
}
@@ -204,7 +204,7 @@ impl<'tcx, 'body> ParseCtxt<'tcx, 'body> {
)
}
- fn parse_operand(&self, expr_id: ExprId) -> PResult<Operand<'tcx>> {
+ pub fn parse_operand(&self, expr_id: ExprId) -> PResult<Operand<'tcx>> {
parse_by_kind!(self, expr_id, expr, "operand",
@call("mir_move", args) => self.parse_place(args[0]).map(Operand::Move),
@call("mir_static", args) => self.parse_static(args[0]),
@@ -283,12 +283,12 @@ impl<'tcx, 'body> ParseCtxt<'tcx, 'body> {
ExprKind::StaticRef { alloc_id, ty, .. } => {
let const_val =
ConstValue::Scalar(Scalar::from_pointer((*alloc_id).into(), &self.tcx));
- let literal = ConstantKind::Val(const_val, *ty);
+ let const_ = Const::Val(const_val, *ty);
- Ok(Operand::Constant(Box::new(Constant {
+ Ok(Operand::Constant(Box::new(ConstOperand {
span: expr.span,
user_ty: None,
- literal
+ const_
})))
},
)
@@ -301,7 +301,7 @@ impl<'tcx, 'body> ParseCtxt<'tcx, 'body> {
| ExprKind::NonHirLiteral { .. }
| ExprKind::ConstBlock { .. } => Ok({
let value = as_constant_inner(expr, |_| None, self.tcx);
- value.literal.eval_bits(self.tcx, self.param_env, value.ty())
+ value.const_.eval_bits(self.tcx, self.param_env)
}),
)
}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_constant.rs b/compiler/rustc_mir_build/src/build/expr/as_constant.rs
index aaa37446e..4ed49e787 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_constant.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_constant.rs
@@ -3,9 +3,7 @@
use crate::build::{parse_float_into_constval, Builder};
use rustc_ast as ast;
use rustc_middle::mir;
-use rustc_middle::mir::interpret::{
- Allocation, ConstValue, LitToConstError, LitToConstInput, Scalar,
-};
+use rustc_middle::mir::interpret::{Allocation, LitToConstError, LitToConstInput, Scalar};
use rustc_middle::mir::*;
use rustc_middle::thir::*;
use rustc_middle::ty::{
@@ -17,7 +15,7 @@ use rustc_target::abi::Size;
impl<'a, 'tcx> Builder<'a, 'tcx> {
/// Compile `expr`, yielding a compile-time constant. Assumes that
/// `expr` is a valid compile-time constant!
- pub(crate) fn as_constant(&mut self, expr: &Expr<'tcx>) -> Constant<'tcx> {
+ pub(crate) fn as_constant(&mut self, expr: &Expr<'tcx>) -> ConstOperand<'tcx> {
let this = self;
let tcx = this.tcx;
let Expr { ty, temp_lifetime: _, span, ref kind } = *expr;
@@ -44,62 +42,62 @@ pub fn as_constant_inner<'tcx>(
expr: &Expr<'tcx>,
push_cuta: impl FnMut(&Box<CanonicalUserType<'tcx>>) -> Option<UserTypeAnnotationIndex>,
tcx: TyCtxt<'tcx>,
-) -> Constant<'tcx> {
+) -> ConstOperand<'tcx> {
let Expr { ty, temp_lifetime: _, span, ref kind } = *expr;
match *kind {
ExprKind::Literal { lit, neg } => {
- let literal =
- match lit_to_mir_constant(tcx, LitToConstInput { lit: &lit.node, ty, neg }) {
- Ok(c) => c,
- Err(LitToConstError::Reported(guar)) => {
- ConstantKind::Ty(ty::Const::new_error(tcx, guar, ty))
- }
- Err(LitToConstError::TypeError) => {
- bug!("encountered type error in `lit_to_mir_constant`")
- }
- };
-
- Constant { span, user_ty: None, literal }
+ let const_ = match lit_to_mir_constant(tcx, LitToConstInput { lit: &lit.node, ty, neg })
+ {
+ Ok(c) => c,
+ Err(LitToConstError::Reported(guar)) => {
+ Const::Ty(ty::Const::new_error(tcx, guar, ty))
+ }
+ Err(LitToConstError::TypeError) => {
+ bug!("encountered type error in `lit_to_mir_constant`")
+ }
+ };
+
+ ConstOperand { span, user_ty: None, const_ }
}
ExprKind::NonHirLiteral { lit, ref user_ty } => {
let user_ty = user_ty.as_ref().and_then(push_cuta);
- let literal = ConstantKind::Val(ConstValue::Scalar(Scalar::Int(lit)), ty);
+ let const_ = Const::Val(ConstValue::Scalar(Scalar::Int(lit)), ty);
- Constant { span, user_ty, literal }
+ ConstOperand { span, user_ty, const_ }
}
ExprKind::ZstLiteral { ref user_ty } => {
let user_ty = user_ty.as_ref().and_then(push_cuta);
- let literal = ConstantKind::Val(ConstValue::ZeroSized, ty);
+ let const_ = Const::Val(ConstValue::ZeroSized, ty);
- Constant { span, user_ty, literal }
+ ConstOperand { span, user_ty, const_ }
}
ExprKind::NamedConst { def_id, args, ref user_ty } => {
let user_ty = user_ty.as_ref().and_then(push_cuta);
let uneval = mir::UnevaluatedConst::new(def_id, args);
- let literal = ConstantKind::Unevaluated(uneval, ty);
+ let const_ = Const::Unevaluated(uneval, ty);
- Constant { user_ty, span, literal }
+ ConstOperand { user_ty, span, const_ }
}
ExprKind::ConstParam { param, def_id: _ } => {
let const_param = ty::Const::new_param(tcx, param, expr.ty);
- let literal = ConstantKind::Ty(const_param);
+ let const_ = Const::Ty(const_param);
- Constant { user_ty: None, span, literal }
+ ConstOperand { user_ty: None, span, const_ }
}
ExprKind::ConstBlock { did: def_id, args } => {
let uneval = mir::UnevaluatedConst::new(def_id, args);
- let literal = ConstantKind::Unevaluated(uneval, ty);
+ let const_ = Const::Unevaluated(uneval, ty);
- Constant { user_ty: None, span, literal }
+ ConstOperand { user_ty: None, span, const_ }
}
ExprKind::StaticRef { alloc_id, ty, .. } => {
let const_val = ConstValue::Scalar(Scalar::from_pointer(alloc_id.into(), &tcx));
- let literal = ConstantKind::Val(const_val, ty);
+ let const_ = Const::Val(const_val, ty);
- Constant { span, user_ty: None, literal }
+ ConstOperand { span, user_ty: None, const_ }
}
_ => span_bug!(span, "expression is not a valid constant {:?}", kind),
}
@@ -109,7 +107,7 @@ pub fn as_constant_inner<'tcx>(
fn lit_to_mir_constant<'tcx>(
tcx: TyCtxt<'tcx>,
lit_input: LitToConstInput<'tcx>,
-) -> Result<ConstantKind<'tcx>, LitToConstError> {
+) -> Result<Const<'tcx>, LitToConstError> {
let LitToConstInput { lit, ty, neg } = lit_input;
let trunc = |n| {
let param_ty = ty::ParamEnv::reveal_all().and(ty);
@@ -133,14 +131,14 @@ fn lit_to_mir_constant<'tcx>(
let s = s.as_str();
let allocation = Allocation::from_bytes_byte_aligned_immutable(s.as_bytes());
let allocation = tcx.mk_const_alloc(allocation);
- ConstValue::Slice { data: allocation, start: 0, end: s.len() }
+ ConstValue::Slice { data: allocation, meta: allocation.inner().size().bytes() }
}
(ast::LitKind::ByteStr(data, _), ty::Ref(_, inner_ty, _))
if matches!(inner_ty.kind(), ty::Slice(_)) =>
{
let allocation = Allocation::from_bytes_byte_aligned_immutable(data as &[u8]);
let allocation = tcx.mk_const_alloc(allocation);
- ConstValue::Slice { data: allocation, start: 0, end: data.len() }
+ ConstValue::Slice { data: allocation, meta: allocation.inner().size().bytes() }
}
(ast::LitKind::ByteStr(data, _), ty::Ref(_, inner_ty, _)) if inner_ty.is_array() => {
let id = tcx.allocate_bytes(data);
@@ -150,7 +148,7 @@ fn lit_to_mir_constant<'tcx>(
{
let allocation = Allocation::from_bytes_byte_aligned_immutable(data as &[u8]);
let allocation = tcx.mk_const_alloc(allocation);
- ConstValue::Slice { data: allocation, start: 0, end: data.len() }
+ ConstValue::Slice { data: allocation, meta: allocation.inner().size().bytes() }
}
(ast::LitKind::Byte(n), ty::Uint(ty::UintTy::U8)) => {
ConstValue::Scalar(Scalar::from_uint(*n, Size::from_bytes(1)))
@@ -175,5 +173,5 @@ fn lit_to_mir_constant<'tcx>(
_ => return Err(LitToConstError::TypeError),
};
- Ok(ConstantKind::Val(value, ty))
+ Ok(Const::Val(value, ty))
}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_place.rs b/compiler/rustc_mir_build/src/build/expr/as_place.rs
index 2e7ef265a..5bccba4fd 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_place.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_place.rs
@@ -102,7 +102,7 @@ fn convert_to_hir_projections_and_truncate_for_capture(
continue;
}
// These do not affect anything, they just make sure we know the right type.
- ProjectionElem::OpaqueCast(_) => continue,
+ ProjectionElem::OpaqueCast(_) | ProjectionElem::Subtype(..) => continue,
ProjectionElem::Index(..)
| ProjectionElem::ConstantIndex { .. }
| ProjectionElem::Subslice { .. } => {
@@ -690,7 +690,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
fake_borrow_temp.into(),
Rvalue::Ref(
tcx.lifetimes.re_erased,
- BorrowKind::Shallow,
+ BorrowKind::Fake,
Place { local: base_place.local, projection },
),
);
@@ -709,6 +709,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
ProjectionElem::Field(..)
| ProjectionElem::Downcast(..)
| ProjectionElem::OpaqueCast(..)
+ | ProjectionElem::Subtype(..)
| ProjectionElem::ConstantIndex { .. }
| ProjectionElem::Subslice { .. } => (),
}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
index 3220a184d..d4089eef4 100644
--- a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
+++ b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
@@ -249,7 +249,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let mut comparer = |range: u128, bin_op: BinOp| -> Place<'tcx> {
let range_val =
- ConstantKind::from_bits(this.tcx, range, ty::ParamEnv::empty().and(unsigned_ty));
+ Const::from_bits(this.tcx, range, ty::ParamEnv::empty().and(unsigned_ty));
let lit_op = this.literal_operand(expr.span, range_val);
let is_bin_op = this.temp(bool_ty, expr_span);
this.cfg.push_assign(
@@ -485,10 +485,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
ExprKind::Assign { .. } | ExprKind::AssignOp { .. } => {
block = unpack!(this.stmt_expr(block, expr, None));
- block.and(Rvalue::Use(Operand::Constant(Box::new(Constant {
+ block.and(Rvalue::Use(Operand::Constant(Box::new(ConstOperand {
span: expr_span,
user_ty: None,
- literal: ConstantKind::zero_sized(this.tcx.types.unit),
+ const_: Const::zero_sized(this.tcx.types.unit),
}))))
}
@@ -817,7 +817,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
fn neg_1_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> {
let param_ty = ty::ParamEnv::empty().and(ty);
let size = self.tcx.layout_of(param_ty).unwrap().size;
- let literal = ConstantKind::from_bits(self.tcx, size.unsigned_int_max(), param_ty);
+ let literal = Const::from_bits(self.tcx, size.unsigned_int_max(), param_ty);
self.literal_operand(span, literal)
}
@@ -828,7 +828,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let param_ty = ty::ParamEnv::empty().and(ty);
let bits = self.tcx.layout_of(param_ty).unwrap().size.bits();
let n = 1 << (bits - 1);
- let literal = ConstantKind::from_bits(self.tcx, n, param_ty);
+ let literal = Const::from_bits(self.tcx, n, param_ty);
self.literal_operand(span, literal)
}
diff --git a/compiler/rustc_mir_build/src/build/expr/into.rs b/compiler/rustc_mir_build/src/build/expr/into.rs
index a5c86e31a..a4de42d45 100644
--- a/compiler/rustc_mir_build/src/build/expr/into.rs
+++ b/compiler/rustc_mir_build/src/build/expr/into.rs
@@ -114,10 +114,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
true_block,
source_info,
destination,
- Constant {
+ ConstOperand {
span: expr_span,
user_ty: None,
- literal: ConstantKind::from_bool(this.tcx, true),
+ const_: Const::from_bool(this.tcx, true),
},
);
@@ -125,10 +125,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
false_block,
source_info,
destination,
- Constant {
+ ConstOperand {
span: expr_span,
user_ty: None,
- literal: ConstantKind::from_bool(this.tcx, false),
+ const_: Const::from_bool(this.tcx, false),
},
);
@@ -159,52 +159,44 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
}
ExprKind::LogicalOp { op, lhs, rhs } => {
- // And:
- //
- // [block: If(lhs)] -true-> [else_block: dest = (rhs)]
- // | (false)
- // [shortcircuit_block: dest = false]
- //
- // Or:
- //
- // [block: If(lhs)] -false-> [else_block: dest = (rhs)]
- // | (true)
- // [shortcircuit_block: dest = true]
-
- let (shortcircuit_block, mut else_block, join_block) = (
- this.cfg.start_new_block(),
- this.cfg.start_new_block(),
- this.cfg.start_new_block(),
- );
-
- let lhs = unpack!(block = this.as_local_operand(block, &this.thir[lhs]));
- let blocks = match op {
- LogicalOp::And => (else_block, shortcircuit_block),
- LogicalOp::Or => (shortcircuit_block, else_block),
+ let condition_scope = this.local_scope();
+ let source_info = this.source_info(expr.span);
+ // We first evaluate the left-hand side of the predicate ...
+ let (then_block, else_block) =
+ this.in_if_then_scope(condition_scope, expr.span, |this| {
+ this.then_else_break(
+ block,
+ &this.thir[lhs],
+ Some(condition_scope),
+ condition_scope,
+ source_info,
+ )
+ });
+ let (short_circuit, continuation, constant) = match op {
+ LogicalOp::And => (else_block, then_block, false),
+ LogicalOp::Or => (then_block, else_block, true),
};
- let term = TerminatorKind::if_(lhs, blocks.0, blocks.1);
- this.cfg.terminate(block, source_info, term);
-
+ // At this point, the control flow splits into a short-circuiting path
+ // and a continuation path.
+ // - If the operator is `&&`, passing `lhs` leads to continuation of evaluation on `rhs`;
+ // failing it leads to the short-circuting path which assigns `false` to the place.
+ // - If the operator is `||`, failing `lhs` leads to continuation of evaluation on `rhs`;
+ // passing it leads to the short-circuting path which assigns `true` to the place.
this.cfg.push_assign_constant(
- shortcircuit_block,
+ short_circuit,
source_info,
destination,
- Constant {
- span: expr_span,
+ ConstOperand {
+ span: expr.span,
user_ty: None,
- literal: match op {
- LogicalOp::And => ConstantKind::from_bool(this.tcx, false),
- LogicalOp::Or => ConstantKind::from_bool(this.tcx, true),
- },
+ const_: Const::from_bool(this.tcx, constant),
},
);
- this.cfg.goto(shortcircuit_block, source_info, join_block);
-
- let rhs = unpack!(else_block = this.as_local_operand(else_block, &this.thir[rhs]));
- this.cfg.push_assign(else_block, source_info, destination, Rvalue::Use(rhs));
- this.cfg.goto(else_block, source_info, join_block);
-
- join_block.unit()
+ let rhs = unpack!(this.expr_into_dest(destination, continuation, &this.thir[rhs]));
+ let target = this.cfg.start_new_block();
+ this.cfg.goto(rhs, source_info, target);
+ this.cfg.goto(short_circuit, source_info, target);
+ target.unit()
}
ExprKind::Loop { body } => {
// [block]
@@ -441,12 +433,20 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
}
thir::InlineAsmOperand::Const { value, span } => {
mir::InlineAsmOperand::Const {
- value: Box::new(Constant { span, user_ty: None, literal: value }),
+ value: Box::new(ConstOperand {
+ span,
+ user_ty: None,
+ const_: value,
+ }),
}
}
thir::InlineAsmOperand::SymFn { value, span } => {
mir::InlineAsmOperand::SymFn {
- value: Box::new(Constant { span, user_ty: None, literal: value }),
+ value: Box::new(ConstOperand {
+ span,
+ user_ty: None,
+ const_: value,
+ }),
}
}
thir::InlineAsmOperand::SymStatic { def_id } => {
diff --git a/compiler/rustc_mir_build/src/build/matches/mod.rs b/compiler/rustc_mir_build/src/build/matches/mod.rs
index 3c4507407..6baf8c7d7 100644
--- a/compiler/rustc_mir_build/src/build/matches/mod.rs
+++ b/compiler/rustc_mir_build/src/build/matches/mod.rs
@@ -64,6 +64,43 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
rhs_then_block.unit()
}
+ ExprKind::LogicalOp { op: LogicalOp::Or, lhs, rhs } => {
+ let local_scope = this.local_scope();
+ let (lhs_success_block, failure_block) =
+ this.in_if_then_scope(local_scope, expr_span, |this| {
+ this.then_else_break(
+ block,
+ &this.thir[lhs],
+ temp_scope_override,
+ local_scope,
+ variable_source_info,
+ )
+ });
+ let rhs_success_block = unpack!(this.then_else_break(
+ failure_block,
+ &this.thir[rhs],
+ temp_scope_override,
+ break_scope,
+ variable_source_info,
+ ));
+ this.cfg.goto(lhs_success_block, variable_source_info, rhs_success_block);
+ rhs_success_block.unit()
+ }
+ ExprKind::Unary { op: UnOp::Not, arg } => {
+ let local_scope = this.local_scope();
+ let (success_block, failure_block) =
+ this.in_if_then_scope(local_scope, expr_span, |this| {
+ this.then_else_break(
+ block,
+ &this.thir[arg],
+ temp_scope_override,
+ local_scope,
+ variable_source_info,
+ )
+ });
+ this.break_for_else(success_block, break_scope, variable_source_info);
+ failure_block.unit()
+ }
ExprKind::Scope { region_scope, lint_level, value } => {
let region_scope = (region_scope, this.source_info(expr_span));
this.in_scope(region_scope, lint_level, |this| {
@@ -76,6 +113,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
)
})
}
+ ExprKind::Use { source } => this.then_else_break(
+ block,
+ &this.thir[source],
+ temp_scope_override,
+ break_scope,
+ variable_source_info,
+ ),
ExprKind::Let { expr, ref pat } => this.lower_let_expr(
block,
&this.thir[expr],
@@ -961,13 +1005,13 @@ enum TestKind<'tcx> {
///
/// For `bool` we always generate two edges, one for `true` and one for
/// `false`.
- options: FxIndexMap<ConstantKind<'tcx>, u128>,
+ options: FxIndexMap<Const<'tcx>, u128>,
},
/// Test for equality with value, possibly after an unsizing coercion to
/// `ty`,
Eq {
- value: ConstantKind<'tcx>,
+ value: Const<'tcx>,
// Integer types are handled by `SwitchInt`, and constants with ADT
// types are converted back into patterns, so this can only be `&str`,
// `&[T]`, `f32` or `f64`.
@@ -1578,9 +1622,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// may want to add cases based on the candidates that are
// available
match test.kind {
- TestKind::SwitchInt { switch_ty, ref mut options } => {
+ TestKind::SwitchInt { switch_ty: _, ref mut options } => {
for candidate in candidates.iter() {
- if !self.add_cases_to_switch(&match_place, candidate, switch_ty, options) {
+ if !self.add_cases_to_switch(&match_place, candidate, options) {
break;
}
}
@@ -1960,7 +2004,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let re_erased = tcx.lifetimes.re_erased;
let scrutinee_source_info = self.source_info(scrutinee_span);
for &(place, temp) in fake_borrows {
- let borrow = Rvalue::Ref(re_erased, BorrowKind::Shallow, place);
+ let borrow = Rvalue::Ref(re_erased, BorrowKind::Fake, place);
self.cfg.push_assign(block, scrutinee_source_info, Place::from(temp), borrow);
}
@@ -2243,6 +2287,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
name,
source_info: debug_source_info,
value: VarDebugInfoContents::Place(for_arm_body.into()),
+ composite: None,
argument_index: None,
});
let locals = if has_guard.0 {
@@ -2262,6 +2307,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
name,
source_info: debug_source_info,
value: VarDebugInfoContents::Place(ref_for_guard.into()),
+ composite: None,
argument_index: None,
});
LocalsForNode::ForGuard { ref_for_guard, for_arm_body }
diff --git a/compiler/rustc_mir_build/src/build/matches/test.rs b/compiler/rustc_mir_build/src/build/matches/test.rs
index 484e84909..795d1db8e 100644
--- a/compiler/rustc_mir_build/src/build/matches/test.rs
+++ b/compiler/rustc_mir_build/src/build/matches/test.rs
@@ -85,8 +85,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
&mut self,
test_place: &PlaceBuilder<'tcx>,
candidate: &Candidate<'pat, 'tcx>,
- switch_ty: Ty<'tcx>,
- options: &mut FxIndexMap<ConstantKind<'tcx>, u128>,
+ options: &mut FxIndexMap<Const<'tcx>, u128>,
) -> bool {
let Some(match_pair) = candidate.match_pairs.iter().find(|mp| mp.place == *test_place)
else {
@@ -95,9 +94,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
match match_pair.pattern.kind {
PatKind::Constant { value } => {
- options
- .entry(value)
- .or_insert_with(|| value.eval_bits(self.tcx, self.param_env, switch_ty));
+ options.entry(value).or_insert_with(|| value.eval_bits(self.tcx, self.param_env));
true
}
PatKind::Variant { .. } => {
@@ -255,10 +252,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
block,
source_info,
TerminatorKind::Call {
- func: Operand::Constant(Box::new(Constant {
+ func: Operand::Constant(Box::new(ConstOperand {
span: test.span,
user_ty: None,
- literal: method,
+ const_: method,
})),
args: vec![Operand::Move(ref_string)],
destination: ref_str,
@@ -388,7 +385,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
block: BasicBlock,
make_target_blocks: impl FnOnce(&mut Self) -> Vec<BasicBlock>,
source_info: SourceInfo,
- value: ConstantKind<'tcx>,
+ value: Const<'tcx>,
mut val: Place<'tcx>,
mut ty: Ty<'tcx>,
) {
@@ -485,7 +482,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
block,
source_info,
TerminatorKind::Call {
- func: Operand::Constant(Box::new(Constant {
+ func: Operand::Constant(Box::new(ConstOperand {
span: source_info.span,
// FIXME(#54571): This constant comes from user input (a
@@ -494,7 +491,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// Need to experiment.
user_ty: None,
- literal: method,
+ const_: method,
})),
args: vec![Operand::Copy(val), expect],
destination: eq_result,
@@ -800,11 +797,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
span_bug!(match_pair.pattern.span, "simplifiable pattern found: {:?}", match_pair.pattern)
}
- fn const_range_contains(
- &self,
- range: &PatRange<'tcx>,
- value: ConstantKind<'tcx>,
- ) -> Option<bool> {
+ fn const_range_contains(&self, range: &PatRange<'tcx>, value: Const<'tcx>) -> Option<bool> {
use std::cmp::Ordering::*;
// For performance, it's important to only do the second
@@ -821,7 +814,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
fn values_not_contained_in_range(
&self,
range: &PatRange<'tcx>,
- options: &FxIndexMap<ConstantKind<'tcx>, u128>,
+ options: &FxIndexMap<Const<'tcx>, u128>,
) -> Option<bool> {
for &val in options.keys() {
if self.const_range_contains(range, val)? {
@@ -866,7 +859,7 @@ fn trait_method<'tcx>(
trait_def_id: DefId,
method_name: Symbol,
args: impl IntoIterator<Item: Into<GenericArg<'tcx>>>,
-) -> ConstantKind<'tcx> {
+) -> Const<'tcx> {
// The unhygienic comparison here is acceptable because this is only
// used on known traits.
let item = tcx
@@ -877,5 +870,5 @@ fn trait_method<'tcx>(
let method_ty = Ty::new_fn_def(tcx, item.def_id, args);
- ConstantKind::zero_sized(method_ty)
+ Const::zero_sized(method_ty)
}
diff --git a/compiler/rustc_mir_build/src/build/misc.rs b/compiler/rustc_mir_build/src/build/misc.rs
index 90d78658f..c96e99ef0 100644
--- a/compiler/rustc_mir_build/src/build/misc.rs
+++ b/compiler/rustc_mir_build/src/build/misc.rs
@@ -25,19 +25,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
/// Convenience function for creating a literal operand, one
/// without any user type annotation.
- pub(crate) fn literal_operand(
- &mut self,
- span: Span,
- literal: ConstantKind<'tcx>,
- ) -> Operand<'tcx> {
- let constant = Box::new(Constant { span, user_ty: None, literal });
+ pub(crate) fn literal_operand(&mut self, span: Span, const_: Const<'tcx>) -> Operand<'tcx> {
+ let constant = Box::new(ConstOperand { span, user_ty: None, const_ });
Operand::Constant(constant)
}
/// Returns a zero literal operand for the appropriate type, works for
/// bool, char and integers.
pub(crate) fn zero_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> {
- let literal = ConstantKind::from_bits(self.tcx, 0, ty::ParamEnv::empty().and(ty));
+ let literal = Const::from_bits(self.tcx, 0, ty::ParamEnv::empty().and(ty));
self.literal_operand(span, literal)
}
@@ -54,10 +50,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
block,
source_info,
temp,
- Constant {
+ ConstOperand {
span: source_info.span,
user_ty: None,
- literal: ConstantKind::from_usize(self.tcx, value),
+ const_: Const::from_usize(self.tcx, value),
},
);
temp
diff --git a/compiler/rustc_mir_build/src/build/mod.rs b/compiler/rustc_mir_build/src/build/mod.rs
index 2a23a69b5..bba470564 100644
--- a/compiler/rustc_mir_build/src/build/mod.rs
+++ b/compiler/rustc_mir_build/src/build/mod.rs
@@ -15,7 +15,6 @@ use rustc_index::{Idx, IndexSlice, IndexVec};
use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
use rustc_middle::hir::place::PlaceBase as HirPlaceBase;
use rustc_middle::middle::region;
-use rustc_middle::mir::interpret::ConstValue;
use rustc_middle::mir::interpret::Scalar;
use rustc_middle::mir::*;
use rustc_middle::thir::{
@@ -56,7 +55,8 @@ pub(crate) fn closure_saved_names_of_captured_variables<'tcx>(
/// Construct the MIR for a given `DefId`.
fn mir_build(tcx: TyCtxt<'_>, def: LocalDefId) -> Body<'_> {
// Ensure unsafeck and abstract const building is ran before we steal the THIR.
- tcx.ensure_with_value().thir_check_unsafety(def);
+ tcx.ensure_with_value()
+ .thir_check_unsafety(tcx.typeck_root_def_id(def.to_def_id()).expect_local());
tcx.ensure_with_value().thir_abstract_const(def);
if let Err(e) = tcx.check_match(def) {
return construct_error(tcx, def, e);
@@ -633,7 +633,7 @@ fn construct_error(tcx: TyCtxt<'_>, def: LocalDefId, err: ErrorGuaranteed) -> Bo
_ => bug!("expected closure or generator, found {ty:?}"),
}
}
- hir::BodyOwnerKind::Const => 0,
+ hir::BodyOwnerKind::Const { .. } => 0,
hir::BodyOwnerKind::Static(_) => 0,
};
let mut cfg = CFG { basic_blocks: IndexVec::new() };
@@ -700,7 +700,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
// Constants always need overflow checks.
check_overflow |= matches!(
tcx.hir().body_owner_kind(def),
- hir::BodyOwnerKind::Const | hir::BodyOwnerKind::Static(_)
+ hir::BodyOwnerKind::Const { .. } | hir::BodyOwnerKind::Static(_)
);
let lint_level = LintLevel::Explicit(hir_id);
@@ -822,6 +822,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
name,
source_info: SourceInfo::outermost(captured_place.var_ident.span),
value: VarDebugInfoContents::Place(use_place),
+ composite: None,
argument_index: None,
});
@@ -851,6 +852,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
name,
source_info,
value: VarDebugInfoContents::Place(arg_local.into()),
+ composite: None,
argument_index: Some(argument_index as u16 + 1),
});
}
diff --git a/compiler/rustc_mir_build/src/build/scope.rs b/compiler/rustc_mir_build/src/build/scope.rs
index a96288a11..4cf6a349a 100644
--- a/compiler/rustc_mir_build/src/build/scope.rs
+++ b/compiler/rustc_mir_build/src/build/scope.rs
@@ -370,7 +370,7 @@ impl DropTree {
let terminator = TerminatorKind::Drop {
target: blocks[drop_data.1].unwrap(),
// The caller will handle this if needed.
- unwind: UnwindAction::Terminate,
+ unwind: UnwindAction::Terminate(UnwindTerminateReason::InCleanup),
place: drop_data.0.local.into(),
replace: false,
};
@@ -685,9 +685,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
drops.add_entry(block, drop_idx);
// `build_drop_trees` doesn't have access to our source_info, so we
- // create a dummy terminator now. `TerminatorKind::Resume` is used
+ // create a dummy terminator now. `TerminatorKind::UnwindResume` is used
// because MIR type checking will panic if it hasn't been overwritten.
- self.cfg.terminate(block, source_info, TerminatorKind::Resume);
+ self.cfg.terminate(block, source_info, TerminatorKind::UnwindResume);
self.cfg.start_new_block().unit()
}
@@ -717,9 +717,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
drops.add_entry(block, drop_idx);
// `build_drop_trees` doesn't have access to our source_info, so we
- // create a dummy terminator now. `TerminatorKind::Resume` is used
+ // create a dummy terminator now. `TerminatorKind::UnwindResume` is used
// because MIR type checking will panic if it hasn't been overwritten.
- self.cfg.terminate(block, source_info, TerminatorKind::Resume);
+ self.cfg.terminate(block, source_info, TerminatorKind::UnwindResume);
}
// Add a dummy `Assign` statement to the CFG, with the span for the source code's `continue`
@@ -1441,7 +1441,7 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
blocks[ROOT_NODE] = *resume_block;
drops.build_mir::<Unwind>(cfg, &mut blocks);
if let (None, Some(resume)) = (*resume_block, blocks[ROOT_NODE]) {
- cfg.terminate(resume, SourceInfo::outermost(fn_span), TerminatorKind::Resume);
+ cfg.terminate(resume, SourceInfo::outermost(fn_span), TerminatorKind::UnwindResume);
*resume_block = blocks[ROOT_NODE];
}
@@ -1506,8 +1506,8 @@ impl<'tcx> DropTreeBuilder<'tcx> for Unwind {
}
TerminatorKind::Goto { .. }
| TerminatorKind::SwitchInt { .. }
- | TerminatorKind::Resume
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindResume
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::Yield { .. }
diff --git a/compiler/rustc_mir_build/src/check_unsafety.rs b/compiler/rustc_mir_build/src/check_unsafety.rs
index 192bd4a83..7b888dcbc 100644
--- a/compiler/rustc_mir_build/src/check_unsafety.rs
+++ b/compiler/rustc_mir_build/src/check_unsafety.rs
@@ -259,7 +259,7 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for UnsafetyVisitor<'a, 'tcx> {
);
};
match borrow_kind {
- BorrowKind::Shallow | BorrowKind::Shared => {
+ BorrowKind::Fake | BorrowKind::Shared => {
if !ty.is_freeze(self.tcx, self.param_env) {
self.requires_unsafe(pat.span, BorrowOfLayoutConstrainedField);
}
@@ -446,7 +446,7 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for UnsafetyVisitor<'a, 'tcx> {
visit::walk_expr(&mut visitor, expr);
if visitor.found {
match borrow_kind {
- BorrowKind::Shallow | BorrowKind::Shared
+ BorrowKind::Fake | BorrowKind::Shared
if !self.thir[arg].ty.is_freeze(self.tcx, self.param_env) =>
{
self.requires_unsafe(expr.span, BorrowOfLayoutConstrainedField)
@@ -454,7 +454,7 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for UnsafetyVisitor<'a, 'tcx> {
BorrowKind::Mut { .. } => {
self.requires_unsafe(expr.span, MutationOfLayoutConstrainedField)
}
- BorrowKind::Shallow | BorrowKind::Shared => {}
+ BorrowKind::Fake | BorrowKind::Shared => {}
}
}
}
diff --git a/compiler/rustc_mir_build/src/errors.rs b/compiler/rustc_mir_build/src/errors.rs
index 3ff3387a7..bee5ac550 100644
--- a/compiler/rustc_mir_build/src/errors.rs
+++ b/compiler/rustc_mir_build/src/errors.rs
@@ -749,6 +749,12 @@ pub struct NontrivialStructuralMatch<'tcx> {
}
#[derive(LintDiagnostic)]
+#[diag(mir_build_non_partial_eq_match)]
+pub struct NonPartialEqMatch<'tcx> {
+ pub non_peq_ty: Ty<'tcx>,
+}
+
+#[derive(LintDiagnostic)]
#[diag(mir_build_overlapping_range_endpoints)]
#[note]
pub struct OverlappingRangeEndpoints<'tcx> {
diff --git a/compiler/rustc_mir_build/src/lints.rs b/compiler/rustc_mir_build/src/lints.rs
index 7fb73b5c7..94be38bee 100644
--- a/compiler/rustc_mir_build/src/lints.rs
+++ b/compiler/rustc_mir_build/src/lints.rs
@@ -186,9 +186,9 @@ impl<'mir, 'tcx, C: TerminatorClassifier<'tcx>> TriColorVisitor<BasicBlocks<'tcx
match self.body[bb].terminator().kind {
// These terminators return control flow to the caller.
- TerminatorKind::Terminate
+ TerminatorKind::UnwindTerminate(_)
| TerminatorKind::GeneratorDrop
- | TerminatorKind::Resume
+ | TerminatorKind::UnwindResume
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::Yield { .. } => ControlFlow::Break(NonRecursive),
diff --git a/compiler/rustc_mir_build/src/thir/cx/expr.rs b/compiler/rustc_mir_build/src/thir/cx/expr.rs
index 6c1f7d7a6..16a85d427 100644
--- a/compiler/rustc_mir_build/src/thir/cx/expr.rs
+++ b/compiler/rustc_mir_build/src/thir/cx/expr.rs
@@ -647,21 +647,15 @@ impl<'tcx> Cx<'tcx> {
out_expr: out_expr.map(|expr| self.mirror_expr(expr)),
},
hir::InlineAsmOperand::Const { ref anon_const } => {
- let value = mir::ConstantKind::from_anon_const(
- tcx,
- anon_const.def_id,
- self.param_env,
- );
+ let value =
+ mir::Const::from_anon_const(tcx, anon_const.def_id, self.param_env);
let span = tcx.def_span(anon_const.def_id);
InlineAsmOperand::Const { value, span }
}
hir::InlineAsmOperand::SymFn { ref anon_const } => {
- let value = mir::ConstantKind::from_anon_const(
- tcx,
- anon_const.def_id,
- self.param_env,
- );
+ let value =
+ mir::Const::from_anon_const(tcx, anon_const.def_id, self.param_env);
let span = tcx.def_span(anon_const.def_id);
InlineAsmOperand::SymFn { value, span }
@@ -950,7 +944,7 @@ impl<'tcx> Cx<'tcx> {
let kind = if self.tcx.is_thread_local_static(id) {
ExprKind::ThreadLocalRef(id)
} else {
- let alloc_id = self.tcx.create_static_alloc(id);
+ let alloc_id = self.tcx.reserve_and_set_static_alloc(id);
ExprKind::StaticRef { alloc_id, ty, def_id: id }
};
ExprKind::Deref {
diff --git a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
index 383e80851..d440ca319 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
@@ -7,6 +7,7 @@ use crate::errors::*;
use rustc_arena::TypedArena;
use rustc_ast::Mutability;
+use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_errors::{
struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, MultiSpan,
@@ -660,6 +661,17 @@ fn report_arm_reachability<'p, 'tcx>(
}
}
+fn collect_non_exhaustive_tys<'p, 'tcx>(
+ pat: &DeconstructedPat<'p, 'tcx>,
+ non_exhaustive_tys: &mut FxHashSet<Ty<'tcx>>,
+) {
+ if matches!(pat.ctor(), Constructor::NonExhaustive) {
+ non_exhaustive_tys.insert(pat.ty());
+ }
+ pat.iter_fields()
+ .for_each(|field_pat| collect_non_exhaustive_tys(field_pat, non_exhaustive_tys))
+}
+
/// Report that a match is not exhaustive.
fn non_exhaustive_match<'p, 'tcx>(
cx: &MatchCheckCtxt<'p, 'tcx>,
@@ -708,31 +720,33 @@ fn non_exhaustive_match<'p, 'tcx>(
};
};
- let is_variant_list_non_exhaustive = matches!(scrut_ty.kind(),
- ty::Adt(def, _) if def.is_variant_list_non_exhaustive() && !def.did().is_local());
-
adt_defined_here(cx, &mut err, scrut_ty, &witnesses);
- err.note(format!(
- "the matched value is of type `{}`{}",
- scrut_ty,
- if is_variant_list_non_exhaustive { ", which is marked as non-exhaustive" } else { "" }
- ));
- if (scrut_ty == cx.tcx.types.usize || scrut_ty == cx.tcx.types.isize)
- && !is_empty_match
- && witnesses.len() == 1
- && matches!(witnesses[0].ctor(), Constructor::NonExhaustive)
- {
- err.note(format!(
- "`{scrut_ty}` does not have a fixed maximum value, so a wildcard `_` is necessary to match \
- exhaustively",
- ));
- if cx.tcx.sess.is_nightly_build() {
- err.help(format!(
- "add `#![feature(precise_pointer_size_matching)]` to the crate attributes to \
- enable precise `{scrut_ty}` matching",
- ));
+ err.note(format!("the matched value is of type `{}`", scrut_ty));
+
+ if !is_empty_match && witnesses.len() == 1 {
+ let mut non_exhaustive_tys = FxHashSet::default();
+ collect_non_exhaustive_tys(&witnesses[0], &mut non_exhaustive_tys);
+
+ for ty in non_exhaustive_tys {
+ if ty.is_ptr_sized_integral() {
+ err.note(format!(
+ "`{ty}` does not have a fixed maximum value, so a wildcard `_` is necessary to match \
+ exhaustively",
+ ));
+ if cx.tcx.sess.is_nightly_build() {
+ err.help(format!(
+ "add `#![feature(precise_pointer_size_matching)]` to the crate attributes to \
+ enable precise `{ty}` matching",
+ ));
+ }
+ } else if ty == cx.tcx.types.str_ {
+ err.note("`&str` cannot be matched exhaustively, so a wildcard `_` is necessary");
+ } else if cx.is_foreign_non_exhaustive_enum(ty) {
+ err.note(format!("`{ty}` is marked as non-exhaustive, so a wildcard `_` is necessary to match exhaustively"));
+ }
}
}
+
if let ty::Ref(_, sub_ty, _) = scrut_ty.kind() {
if !sub_ty.is_inhabited_from(cx.tcx, cx.module, cx.param_env) {
err.note("references are always considered inhabited");
diff --git a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
index 1376344cf..ae4424660 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
@@ -16,18 +16,20 @@ use std::cell::Cell;
use super::PatCtxt;
use crate::errors::{
- FloatPattern, IndirectStructuralMatch, InvalidPattern, NontrivialStructuralMatch,
- PointerPattern, TypeNotStructural, UnionPattern, UnsizedPattern,
+ FloatPattern, IndirectStructuralMatch, InvalidPattern, NonPartialEqMatch,
+ NontrivialStructuralMatch, PointerPattern, TypeNotStructural, UnionPattern, UnsizedPattern,
};
impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
/// Converts an evaluated constant to a pattern (if possible).
/// This means aggregate values (like structs and enums) are converted
/// to a pattern that matches the value (as if you'd compared via structural equality).
+ ///
+ /// `cv` must be a valtree or a `mir::ConstValue`.
#[instrument(level = "debug", skip(self), ret)]
pub(super) fn const_to_pat(
&self,
- cv: mir::ConstantKind<'tcx>,
+ cv: mir::Const<'tcx>,
id: hir::HirId,
span: Span,
check_body_for_struct_match_violation: Option<DefId>,
@@ -64,12 +66,10 @@ struct ConstToPat<'tcx> {
}
/// This error type signals that we encountered a non-struct-eq situation.
-/// We bubble this up in order to get back to the reference destructuring and make that emit
-/// a const pattern instead of a deref pattern. This allows us to simply call `PartialEq::eq`
-/// on such patterns (since that function takes a reference) and not have to jump through any
-/// hoops to get a reference to the value.
+/// We will fall back to calling `PartialEq::eq` on such patterns,
+/// and exhaustiveness checking will consider them as matching nothing.
#[derive(Debug)]
-struct FallbackToConstRef;
+struct FallbackToOpaqueConst;
impl<'tcx> ConstToPat<'tcx> {
fn new(
@@ -104,7 +104,7 @@ impl<'tcx> ConstToPat<'tcx> {
fn to_pat(
&mut self,
- cv: mir::ConstantKind<'tcx>,
+ cv: mir::Const<'tcx>,
check_body_for_struct_match_violation: Option<DefId>,
) -> Box<Pat<'tcx>> {
trace!(self.treat_byte_string_as_slice);
@@ -124,7 +124,7 @@ impl<'tcx> ConstToPat<'tcx> {
debug!(?check_body_for_struct_match_violation, ?mir_structural_match_violation);
let inlined_const_as_pat = match cv {
- mir::ConstantKind::Ty(c) => match c.kind() {
+ mir::Const::Ty(c) => match c.kind() {
ty::ConstKind::Param(_)
| ty::ConstKind::Infer(_)
| ty::ConstKind::Bound(_, _)
@@ -136,7 +136,7 @@ impl<'tcx> ConstToPat<'tcx> {
}
ty::ConstKind::Value(valtree) => self
.recur(valtree, cv.ty(), mir_structural_match_violation.unwrap_or(false))
- .unwrap_or_else(|_| {
+ .unwrap_or_else(|_: FallbackToOpaqueConst| {
Box::new(Pat {
span: self.span,
ty: cv.ty(),
@@ -144,10 +144,10 @@ impl<'tcx> ConstToPat<'tcx> {
})
}),
},
- mir::ConstantKind::Unevaluated(_, _) => {
+ mir::Const::Unevaluated(_, _) => {
span_bug!(self.span, "unevaluated const in `to_pat`: {cv:?}")
}
- mir::ConstantKind::Val(_, _) => Box::new(Pat {
+ mir::Const::Val(_, _) => Box::new(Pat {
span: self.span,
ty: cv.ty(),
kind: PatKind::Constant { value: cv },
@@ -155,8 +155,9 @@ impl<'tcx> ConstToPat<'tcx> {
};
if !self.saw_const_match_error.get() {
- // If we were able to successfully convert the const to some pat,
- // double-check that all types in the const implement `Structural`.
+ // If we were able to successfully convert the const to some pat (possibly with some
+ // lints, but no errors), double-check that all types in the const implement
+ // `Structural` and `PartialEq`.
let structural =
traits::search_for_structural_match_violation(self.span, self.tcx(), cv.ty());
@@ -178,7 +179,7 @@ impl<'tcx> ConstToPat<'tcx> {
}
if let Some(non_sm_ty) = structural {
- if !self.type_may_have_partial_eq_impl(cv.ty()) {
+ if !self.type_has_partial_eq_impl(cv.ty()) {
if let ty::Adt(def, ..) = non_sm_ty.kind() {
if def.is_union() {
let err = UnionPattern { span: self.span };
@@ -192,8 +193,10 @@ impl<'tcx> ConstToPat<'tcx> {
} else {
let err = InvalidPattern { span: self.span, non_sm_ty };
self.tcx().sess.emit_err(err);
- return Box::new(Pat { span: self.span, ty: cv.ty(), kind: PatKind::Wild });
}
+ // All branches above emitted an error. Don't print any more lints.
+ // The pattern we return is irrelevant since we errored.
+ return Box::new(Pat { span: self.span, ty: cv.ty(), kind: PatKind::Wild });
} else if !self.saw_const_match_lint.get() {
if let Some(mir_structural_match_violation) = mir_structural_match_violation {
match non_sm_ty.kind() {
@@ -238,13 +241,24 @@ impl<'tcx> ConstToPat<'tcx> {
_ => {}
}
}
+
+ // Always check for `PartialEq`, even if we emitted other lints. (But not if there were
+ // any errors.) This ensures it shows up in cargo's future-compat reports as well.
+ if !self.type_has_partial_eq_impl(cv.ty()) {
+ self.tcx().emit_spanned_lint(
+ lint::builtin::CONST_PATTERNS_WITHOUT_PARTIAL_EQ,
+ self.id,
+ self.span,
+ NonPartialEqMatch { non_peq_ty: cv.ty() },
+ );
+ }
}
inlined_const_as_pat
}
#[instrument(level = "trace", skip(self), ret)]
- fn type_may_have_partial_eq_impl(&self, ty: Ty<'tcx>) -> bool {
+ fn type_has_partial_eq_impl(&self, ty: Ty<'tcx>) -> bool {
// double-check there even *is* a semantic `PartialEq` to dispatch to.
//
// (If there isn't, then we can safely issue a hard
@@ -259,14 +273,19 @@ impl<'tcx> ConstToPat<'tcx> {
ty::TraitRef::new(self.tcx(), partial_eq_trait_id, [ty, ty]),
);
- // FIXME: should this call a `predicate_must_hold` variant instead?
- self.infcx.predicate_may_hold(&partial_eq_obligation)
+ // This *could* accept a type that isn't actually `PartialEq`, because region bounds get
+ // ignored. However that should be pretty much impossible since consts that do not depend on
+ // generics can only mention the `'static` lifetime, and how would one have a type that's
+ // `PartialEq` for some lifetime but *not* for `'static`? If this ever becomes a problem
+ // we'll need to leave some sort of trace of this requirement in the MIR so that borrowck
+ // can ensure that the type really implements `PartialEq`.
+ self.infcx.predicate_must_hold_modulo_regions(&partial_eq_obligation)
}
fn field_pats(
&self,
vals: impl Iterator<Item = (ValTree<'tcx>, Ty<'tcx>)>,
- ) -> Result<Vec<FieldPat<'tcx>>, FallbackToConstRef> {
+ ) -> Result<Vec<FieldPat<'tcx>>, FallbackToOpaqueConst> {
vals.enumerate()
.map(|(idx, (val, ty))| {
let field = FieldIdx::new(idx);
@@ -284,7 +303,7 @@ impl<'tcx> ConstToPat<'tcx> {
cv: ValTree<'tcx>,
ty: Ty<'tcx>,
mir_structural_match_violation: bool,
- ) -> Result<Box<Pat<'tcx>>, FallbackToConstRef> {
+ ) -> Result<Box<Pat<'tcx>>, FallbackToOpaqueConst> {
let id = self.id;
let span = self.span;
let tcx = self.tcx();
@@ -299,7 +318,7 @@ impl<'tcx> ConstToPat<'tcx> {
span,
FloatPattern,
);
- return Err(FallbackToConstRef);
+ return Err(FallbackToOpaqueConst);
}
// If the type is not structurally comparable, just emit the constant directly,
// causing the pattern match code to treat it opaquely.
@@ -323,11 +342,12 @@ impl<'tcx> ConstToPat<'tcx> {
// Since we are behind a reference, we can just bubble the error up so we get a
// constant at reference type, making it easy to let the fallback call
// `PartialEq::eq` on it.
- return Err(FallbackToConstRef);
+ return Err(FallbackToOpaqueConst);
}
ty::FnDef(..) => {
self.saw_const_match_error.set(true);
tcx.sess.emit_err(InvalidPattern { span, non_sm_ty: ty });
+ // We errored, so the pattern we generate is irrelevant.
PatKind::Wild
}
ty::Adt(adt_def, _) if !self.type_marked_structural(ty) => {
@@ -335,6 +355,7 @@ impl<'tcx> ConstToPat<'tcx> {
self.saw_const_match_error.set(true);
let err = TypeNotStructural { span, non_sm_ty: ty };
tcx.sess.emit_err(err);
+ // We errored, so the pattern we generate is irrelevant.
PatKind::Wild
}
ty::Adt(adt_def, args) if adt_def.is_enum() => {
@@ -385,9 +406,9 @@ impl<'tcx> ConstToPat<'tcx> {
ty::Ref(_, pointee_ty, ..) => match *pointee_ty.kind() {
// `&str` is represented as a valtree, let's keep using this
// optimization for now.
- ty::Str => PatKind::Constant {
- value: mir::ConstantKind::Ty(ty::Const::new_value(tcx, cv, ty)),
- },
+ ty::Str => {
+ PatKind::Constant { value: mir::Const::Ty(ty::Const::new_value(tcx, cv, ty)) }
+ }
// Backwards compatibility hack: support references to non-structural types,
// but hard error if we aren't behind a double reference. We could just use
// the fallback code path below, but that would allow *more* of this fishy
@@ -404,13 +425,15 @@ impl<'tcx> ConstToPat<'tcx> {
IndirectStructuralMatch { non_sm_ty: *pointee_ty },
);
}
- return Err(FallbackToConstRef);
+ return Err(FallbackToOpaqueConst);
} else {
if !self.saw_const_match_error.get() {
self.saw_const_match_error.set(true);
let err = TypeNotStructural { span, non_sm_ty: *pointee_ty };
tcx.sess.emit_err(err);
}
+ tcx.sess.delay_span_bug(span, "`saw_const_match_error` set but no error?");
+ // We errored, so the pattern we generate is irrelevant.
PatKind::Wild
}
}
@@ -423,6 +446,7 @@ impl<'tcx> ConstToPat<'tcx> {
tcx.sess.emit_err(err);
// FIXME: introduce PatKind::Error to silence follow up diagnostics due to unreachable patterns.
+ // We errored, so the pattern we generate is irrelevant.
PatKind::Wild
} else {
let old = self.behind_reference.replace(true);
@@ -445,14 +469,15 @@ impl<'tcx> ConstToPat<'tcx> {
}
}
},
- ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) => PatKind::Constant {
- value: mir::ConstantKind::Ty(ty::Const::new_value(tcx, cv, ty)),
- },
+ ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) => {
+ PatKind::Constant { value: mir::Const::Ty(ty::Const::new_value(tcx, cv, ty)) }
+ }
ty::FnPtr(..) | ty::RawPtr(..) => unreachable!(),
_ => {
self.saw_const_match_error.set(true);
let err = InvalidPattern { span, non_sm_ty: ty };
tcx.sess.emit_err(err);
+ // We errored, so the pattern we generate is irrelevant.
PatKind::Wild
}
};
diff --git a/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs
index bee1c4e46..b79beb1c5 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs
@@ -137,16 +137,16 @@ impl IntRange {
fn from_constant<'tcx>(
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
- value: mir::ConstantKind<'tcx>,
+ value: mir::Const<'tcx>,
) -> Option<IntRange> {
let ty = value.ty();
let (target_size, bias) = Self::integral_size_and_signed_bias(tcx, ty)?;
let val = match value {
- mir::ConstantKind::Ty(c) if let ty::ConstKind::Value(valtree) = c.kind() => {
+ mir::Const::Ty(c) if let ty::ConstKind::Value(valtree) = c.kind() => {
valtree.unwrap_leaf().to_bits(target_size).ok()
},
// This is a more general form of the previous case.
- _ => value.try_eval_bits(tcx, param_env, ty),
+ _ => value.try_eval_bits(tcx, param_env),
}?;
let val = val ^ bias;
@@ -225,8 +225,8 @@ impl IntRange {
let (lo, hi) = (lo ^ bias, hi ^ bias);
let env = ty::ParamEnv::empty().and(ty);
- let lo_const = mir::ConstantKind::from_bits(tcx, lo, env);
- let hi_const = mir::ConstantKind::from_bits(tcx, hi, env);
+ let lo_const = mir::Const::from_bits(tcx, lo, env);
+ let hi_const = mir::Const::from_bits(tcx, hi, env);
let kind = if lo == hi {
PatKind::Constant { value: lo_const }
@@ -619,9 +619,9 @@ pub(super) enum Constructor<'tcx> {
/// Ranges of integer literal values (`2`, `2..=5` or `2..5`).
IntRange(IntRange),
/// Ranges of floating-point literal values (`2.0..=5.2`).
- FloatRange(mir::ConstantKind<'tcx>, mir::ConstantKind<'tcx>, RangeEnd),
+ FloatRange(mir::Const<'tcx>, mir::Const<'tcx>, RangeEnd),
/// String literals. Strings are not quite the same as `&[u8]` so we treat them separately.
- Str(mir::ConstantKind<'tcx>),
+ Str(mir::Const<'tcx>),
/// Array and slice patterns.
Slice(Slice),
/// Constants that must not be matched structurally. They are treated as black
@@ -1379,8 +1379,8 @@ impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> {
let ty = lo.ty();
ctor = if let Some(int_range) = IntRange::from_range(
cx.tcx,
- lo.eval_bits(cx.tcx, cx.param_env, lo.ty()),
- hi.eval_bits(cx.tcx, cx.param_env, hi.ty()),
+ lo.eval_bits(cx.tcx, cx.param_env),
+ hi.eval_bits(cx.tcx, cx.param_env),
ty,
&end,
) {
diff --git a/compiler/rustc_mir_build/src/thir/pattern/mod.rs b/compiler/rustc_mir_build/src/thir/pattern/mod.rs
index c08fe54c3..fe47a1cd7 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/mod.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/mod.rs
@@ -18,9 +18,9 @@ use rustc_hir::pat_util::EnumerateAndAdjustIterator;
use rustc_hir::RangeEnd;
use rustc_index::Idx;
use rustc_middle::mir::interpret::{
- ConstValue, ErrorHandled, GlobalId, LitToConstError, LitToConstInput, Scalar,
+ ErrorHandled, GlobalId, LitToConstError, LitToConstInput, Scalar,
};
-use rustc_middle::mir::{self, ConstantKind, UserTypeProjection};
+use rustc_middle::mir::{self, Const, UserTypeProjection};
use rustc_middle::mir::{BorrowKind, Mutability};
use rustc_middle::thir::{Ascription, BindingMode, FieldPat, LocalVarId, Pat, PatKind, PatRange};
use rustc_middle::ty::CanonicalUserTypeAnnotation;
@@ -100,8 +100,8 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
fn lower_pattern_range(
&mut self,
ty: Ty<'tcx>,
- lo: mir::ConstantKind<'tcx>,
- hi: mir::ConstantKind<'tcx>,
+ lo: mir::Const<'tcx>,
+ hi: mir::Const<'tcx>,
end: RangeEnd,
span: Span,
lo_expr: Option<&hir::Expr<'tcx>>,
@@ -131,7 +131,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
if let Some(hir::Expr { kind: hir::ExprKind::Lit(lit), .. }) = lo_expr
&& let rustc_ast::ast::LitKind::Int(val, _) = lit.node
{
- if lo.eval_bits(self.tcx, self.param_env, ty) != val {
+ if lo.eval_bits(self.tcx, self.param_env) != val {
lower_overflow = true;
self.tcx.sess.emit_err(LiteralOutOfRange { span: lit.span, ty, max: max() });
}
@@ -139,7 +139,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
if let Some(hir::Expr { kind: hir::ExprKind::Lit(lit), .. }) = hi_expr
&& let rustc_ast::ast::LitKind::Int(val, _) = lit.node
{
- if hi.eval_bits(self.tcx, self.param_env, ty) != val {
+ if hi.eval_bits(self.tcx, self.param_env) != val {
higher_overflow = true;
self.tcx.sess.emit_err(LiteralOutOfRange { span: lit.span, ty, max: max() });
}
@@ -162,7 +162,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
if let Some(hir::Expr { kind: hir::ExprKind::Lit(lit), .. }) = lo_expr
&& let rustc_ast::ast::LitKind::Int(val, _) = lit.node
{
- if lo.eval_bits(self.tcx, self.param_env, ty) != val {
+ if lo.eval_bits(self.tcx, self.param_env) != val {
lower_overflow = true;
self.tcx.sess.emit_err(LiteralOutOfRange { span: lit.span, ty, max: max() });
}
@@ -170,7 +170,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
if let Some(hir::Expr { kind: hir::ExprKind::Lit(lit), .. }) = hi_expr
&& let rustc_ast::ast::LitKind::Int(val, _) = lit.node
{
- if hi.eval_bits(self.tcx, self.param_env, ty) != val {
+ if hi.eval_bits(self.tcx, self.param_env) != val {
higher_overflow = true;
self.tcx.sess.emit_err(LiteralOutOfRange { span: lit.span, ty, max: max() });
}
@@ -191,18 +191,18 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
ty: Ty<'tcx>,
lo: Option<&PatKind<'tcx>>,
hi: Option<&PatKind<'tcx>>,
- ) -> Option<(mir::ConstantKind<'tcx>, mir::ConstantKind<'tcx>)> {
+ ) -> Option<(mir::Const<'tcx>, mir::Const<'tcx>)> {
match (lo, hi) {
(Some(PatKind::Constant { value: lo }), Some(PatKind::Constant { value: hi })) => {
Some((*lo, *hi))
}
(Some(PatKind::Constant { value: lo }), None) => {
let hi = ty.numeric_max_val(self.tcx)?;
- Some((*lo, mir::ConstantKind::from_const(hi, self.tcx)))
+ Some((*lo, mir::Const::from_ty_const(hi, self.tcx)))
}
(None, Some(PatKind::Constant { value: hi })) => {
let lo = ty.numeric_min_val(self.tcx)?;
- Some((mir::ConstantKind::from_const(lo, self.tcx), *hi))
+ Some((mir::Const::from_ty_const(lo, self.tcx), *hi))
}
_ => None,
}
@@ -439,7 +439,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
DefKind::Struct
| DefKind::Ctor(CtorOf::Struct, ..)
| DefKind::Union
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::AssocTy,
_,
)
@@ -525,8 +525,8 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
.tcx
.const_eval_global_id_for_typeck(param_env_reveal_all, cid, Some(span))
.map(|val| match val {
- Some(valtree) => mir::ConstantKind::Ty(ty::Const::new_value(self.tcx, valtree, ty)),
- None => mir::ConstantKind::Val(
+ Some(valtree) => mir::Const::Ty(ty::Const::new_value(self.tcx, valtree, ty)),
+ None => mir::Const::Val(
self.tcx
.const_eval_global_id(param_env_reveal_all, cid, Some(span))
.expect("const_eval_global_id_for_typeck should have already failed"),
@@ -555,8 +555,8 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
subpattern: pattern,
ascription: Ascription {
annotation,
- /// Note that use `Contravariant` here. See the
- /// `variance` field documentation for details.
+ // Note that use `Contravariant` here. See the
+ // `variance` field documentation for details.
variance: ty::Variance::Contravariant,
},
},
@@ -566,7 +566,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
pattern
}
}
- Err(ErrorHandled::TooGeneric) => {
+ Err(ErrorHandled::TooGeneric(_)) => {
// While `Reported | Linted` cases will have diagnostics emitted already
// it is not true for TooGeneric case, so we need to give user more information.
self.tcx.sess.emit_err(ConstPatternDependsOnGenericParameter { span });
@@ -608,7 +608,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
};
if let Some(lit_input) = lit_input {
match tcx.at(expr.span).lit_to_const(lit_input) {
- Ok(c) => return self.const_to_pat(ConstantKind::Ty(c), id, span, None).kind,
+ Ok(c) => return self.const_to_pat(Const::Ty(c), id, span, None).kind,
// If an error occurred, ignore that it's a literal
// and leave reporting the error up to const eval of
// the unevaluated constant below.
@@ -626,11 +626,13 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
let ct = ty::UnevaluatedConst { def: def_id.to_def_id(), args: args };
// First try using a valtree in order to destructure the constant into a pattern.
+ // FIXME: replace "try to do a thing, then fall back to another thing"
+ // but something more principled, like a trait query checking whether this can be turned into a valtree.
if let Ok(Some(valtree)) =
self.tcx.const_eval_resolve_for_typeck(self.param_env, ct, Some(span))
{
self.const_to_pat(
- ConstantKind::Ty(ty::Const::new_value(self.tcx, valtree, ty)),
+ Const::Ty(ty::Const::new_value(self.tcx, valtree, ty)),
id,
span,
None,
@@ -638,14 +640,14 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
.kind
} else {
// If that fails, convert it to an opaque constant pattern.
- match tcx.const_eval_resolve(self.param_env, uneval, None) {
- Ok(val) => self.const_to_pat(mir::ConstantKind::Val(val, ty), id, span, None).kind,
- Err(ErrorHandled::TooGeneric) => {
+ match tcx.const_eval_resolve(self.param_env, uneval, Some(span)) {
+ Ok(val) => self.const_to_pat(mir::Const::Val(val, ty), id, span, None).kind,
+ Err(ErrorHandled::TooGeneric(_)) => {
// If we land here it means the const can't be evaluated because it's `TooGeneric`.
self.tcx.sess.emit_err(ConstPatternDependsOnGenericParameter { span });
PatKind::Wild
}
- Err(ErrorHandled::Reported(_)) => PatKind::Wild,
+ Err(ErrorHandled::Reported(..)) => PatKind::Wild,
}
}
}
@@ -676,7 +678,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
LitToConstInput { lit: &lit.node, ty: self.typeck_results.expr_ty(expr), neg };
match self.tcx.at(expr.span).lit_to_const(lit_input) {
Ok(constant) => {
- self.const_to_pat(ConstantKind::Ty(constant), expr.hir_id, lit.span, None).kind
+ self.const_to_pat(Const::Ty(constant), expr.hir_id, lit.span, None).kind
}
Err(LitToConstError::Reported(_)) => PatKind::Wild,
Err(LitToConstError::TypeError) => bug!("lower_lit: had type error"),
@@ -836,8 +838,8 @@ impl<'tcx> PatternFoldable<'tcx> for PatKind<'tcx> {
#[instrument(skip(tcx), level = "debug")]
pub(crate) fn compare_const_vals<'tcx>(
tcx: TyCtxt<'tcx>,
- a: mir::ConstantKind<'tcx>,
- b: mir::ConstantKind<'tcx>,
+ a: mir::Const<'tcx>,
+ b: mir::Const<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> Option<Ordering> {
assert_eq!(a.ty(), b.ty());
@@ -853,18 +855,18 @@ pub(crate) fn compare_const_vals<'tcx>(
ty::Float(_) | ty::Int(_) => {} // require special handling, see below
_ => match (a, b) {
(
- mir::ConstantKind::Val(ConstValue::Scalar(Scalar::Int(a)), _a_ty),
- mir::ConstantKind::Val(ConstValue::Scalar(Scalar::Int(b)), _b_ty),
+ mir::Const::Val(mir::ConstValue::Scalar(Scalar::Int(a)), _a_ty),
+ mir::Const::Val(mir::ConstValue::Scalar(Scalar::Int(b)), _b_ty),
) => return Some(a.cmp(&b)),
- (mir::ConstantKind::Ty(a), mir::ConstantKind::Ty(b)) => {
+ (mir::Const::Ty(a), mir::Const::Ty(b)) => {
return Some(a.kind().cmp(&b.kind()));
}
_ => {}
},
}
- let a = a.eval_bits(tcx, param_env, ty);
- let b = b.eval_bits(tcx, param_env, ty);
+ let a = a.eval_bits(tcx, param_env);
+ let b = b.eval_bits(tcx, param_env);
use rustc_apfloat::Float;
match *ty.kind() {
diff --git a/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs b/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs
index 08cfe98bb..21031e8ba 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs
@@ -618,10 +618,15 @@ impl<'p, 'tcx> Usefulness<'p, 'tcx> {
let new_witnesses = if let Constructor::Missing { .. } = ctor {
// We got the special `Missing` constructor, so each of the missing constructors
// gives a new pattern that is not caught by the match. We list those patterns.
- let new_patterns = if pcx.is_non_exhaustive {
- // Here we don't want the user to try to list all variants, we want them to add
- // a wildcard, so we only suggest that.
- vec![DeconstructedPat::wildcard(pcx.ty, pcx.span)]
+ if pcx.is_non_exhaustive {
+ witnesses
+ .into_iter()
+ // Here we don't want the user to try to list all variants, we want them to add
+ // a wildcard, so we only suggest that.
+ .map(|witness| {
+ witness.apply_constructor(pcx, &Constructor::NonExhaustive)
+ })
+ .collect()
} else {
let mut split_wildcard = SplitWildcard::new(pcx);
split_wildcard.split(pcx, matrix.heads().map(DeconstructedPat::ctor));
@@ -633,7 +638,7 @@ impl<'p, 'tcx> Usefulness<'p, 'tcx> {
// constructor, that matches everything that can be built with
// it. For example, if `ctor` is a `Constructor::Variant` for
// `Option::Some`, we get the pattern `Some(_)`.
- let mut new: Vec<DeconstructedPat<'_, '_>> = split_wildcard
+ let mut new_patterns: Vec<DeconstructedPat<'_, '_>> = split_wildcard
.iter_missing(pcx)
.filter_map(|missing_ctor| {
// Check if this variant is marked `doc(hidden)`
@@ -648,27 +653,25 @@ impl<'p, 'tcx> Usefulness<'p, 'tcx> {
.collect();
if hide_variant_show_wild {
- new.push(DeconstructedPat::wildcard(pcx.ty, pcx.span));
+ new_patterns.push(DeconstructedPat::wildcard(pcx.ty, pcx.span));
}
- new
- };
-
- witnesses
- .into_iter()
- .flat_map(|witness| {
- new_patterns.iter().map(move |pat| {
- Witness(
- witness
- .0
- .iter()
- .chain(once(pat))
- .map(DeconstructedPat::clone_and_forget_reachability)
- .collect(),
- )
+ witnesses
+ .into_iter()
+ .flat_map(|witness| {
+ new_patterns.iter().map(move |pat| {
+ Witness(
+ witness
+ .0
+ .iter()
+ .chain(once(pat))
+ .map(DeconstructedPat::clone_and_forget_reachability)
+ .collect(),
+ )
+ })
})
- })
- .collect()
+ .collect()
+ }
} else {
witnesses
.into_iter()
diff --git a/compiler/rustc_mir_dataflow/src/elaborate_drops.rs b/compiler/rustc_mir_dataflow/src/elaborate_drops.rs
index 9e02b0271..c9991e499 100644
--- a/compiler/rustc_mir_dataflow/src/elaborate_drops.rs
+++ b/compiler/rustc_mir_dataflow/src/elaborate_drops.rs
@@ -80,7 +80,7 @@ impl Unwind {
fn into_action(self) -> UnwindAction {
match self {
Unwind::To(bb) => UnwindAction::Cleanup(bb),
- Unwind::InCleanup => UnwindAction::Terminate,
+ Unwind::InCleanup => UnwindAction::Terminate(UnwindTerminateReason::InCleanup),
}
}
@@ -194,6 +194,7 @@ where
D: DropElaborator<'b, 'tcx>,
'tcx: 'b,
{
+ #[instrument(level = "trace", skip(self), ret)]
fn place_ty(&self, place: Place<'tcx>) -> Ty<'tcx> {
place.ty(self.elaborator.body(), self.tcx()).ty
}
@@ -220,11 +221,9 @@ where
//
// FIXME: I think we should just control the flags externally,
// and then we do not need this machinery.
+ #[instrument(level = "debug")]
pub fn elaborate_drop(&mut self, bb: BasicBlock) {
- debug!("elaborate_drop({:?}, {:?})", bb, self);
- let style = self.elaborator.drop_style(self.path, DropFlagMode::Deep);
- debug!("elaborate_drop({:?}, {:?}): live - {:?}", bb, self, style);
- match style {
+ match self.elaborator.drop_style(self.path, DropFlagMode::Deep) {
DropStyle::Dead => {
self.elaborator
.patch()
@@ -973,10 +972,10 @@ where
}
fn constant_usize(&self, val: u16) -> Operand<'tcx> {
- Operand::Constant(Box::new(Constant {
+ Operand::Constant(Box::new(ConstOperand {
span: self.source_info.span,
user_ty: None,
- literal: ConstantKind::from_usize(self.tcx(), val.into()),
+ const_: Const::from_usize(self.tcx(), val.into()),
}))
}
diff --git a/compiler/rustc_mir_dataflow/src/framework/direction.rs b/compiler/rustc_mir_dataflow/src/framework/direction.rs
index 8a9e37c5a..70451edd5 100644
--- a/compiler/rustc_mir_dataflow/src/framework/direction.rs
+++ b/compiler/rustc_mir_dataflow/src/framework/direction.rs
@@ -1,5 +1,5 @@
use rustc_middle::mir::{
- self, BasicBlock, CallReturnPlaces, Location, SwitchTargets, TerminatorEdges, UnwindAction,
+ self, BasicBlock, CallReturnPlaces, Location, SwitchTargets, TerminatorEdges,
};
use std::ops::RangeInclusive;
@@ -486,10 +486,10 @@ impl Direction for Forward {
propagate(target, exit_state);
propagate(unwind, exit_state);
}
- TerminatorEdges::AssignOnReturn { return_, unwind, place } => {
+ TerminatorEdges::AssignOnReturn { return_, cleanup, place } => {
// This must be done *first*, otherwise the unwind path will see the assignments.
- if let UnwindAction::Cleanup(unwind) = unwind {
- propagate(unwind, exit_state);
+ if let Some(cleanup) = cleanup {
+ propagate(cleanup, exit_state);
}
if let Some(return_) = return_ {
analysis.apply_call_return_effect(exit_state, bb, place);
diff --git a/compiler/rustc_mir_dataflow/src/framework/graphviz.rs b/compiler/rustc_mir_dataflow/src/framework/graphviz.rs
index 1421d9b45..bdddaaebc 100644
--- a/compiler/rustc_mir_dataflow/src/framework/graphviz.rs
+++ b/compiler/rustc_mir_dataflow/src/framework/graphviz.rs
@@ -538,7 +538,7 @@ where
fn visit_block_start(
&mut self,
- _results: &Results<'tcx, A>,
+ _results: &mut Results<'tcx, A>,
state: &Self::FlowState,
_block_data: &mir::BasicBlockData<'tcx>,
_block: BasicBlock,
@@ -550,7 +550,7 @@ where
fn visit_block_end(
&mut self,
- _results: &Results<'tcx, A>,
+ _results: &mut Results<'tcx, A>,
state: &Self::FlowState,
_block_data: &mir::BasicBlockData<'tcx>,
_block: BasicBlock,
@@ -562,7 +562,7 @@ where
fn visit_statement_before_primary_effect(
&mut self,
- results: &Results<'tcx, A>,
+ results: &mut Results<'tcx, A>,
state: &Self::FlowState,
_statement: &mir::Statement<'tcx>,
_location: Location,
@@ -575,7 +575,7 @@ where
fn visit_statement_after_primary_effect(
&mut self,
- results: &Results<'tcx, A>,
+ results: &mut Results<'tcx, A>,
state: &Self::FlowState,
_statement: &mir::Statement<'tcx>,
_location: Location,
@@ -586,7 +586,7 @@ where
fn visit_terminator_before_primary_effect(
&mut self,
- results: &Results<'tcx, A>,
+ results: &mut Results<'tcx, A>,
state: &Self::FlowState,
_terminator: &mir::Terminator<'tcx>,
_location: Location,
@@ -599,7 +599,7 @@ where
fn visit_terminator_after_primary_effect(
&mut self,
- results: &Results<'tcx, A>,
+ results: &mut Results<'tcx, A>,
state: &Self::FlowState,
_terminator: &mir::Terminator<'tcx>,
_location: Location,
diff --git a/compiler/rustc_mir_dataflow/src/framework/visitor.rs b/compiler/rustc_mir_dataflow/src/framework/visitor.rs
index 76a729827..3cfa7cc1c 100644
--- a/compiler/rustc_mir_dataflow/src/framework/visitor.rs
+++ b/compiler/rustc_mir_dataflow/src/framework/visitor.rs
@@ -35,7 +35,7 @@ pub trait ResultsVisitor<'mir, 'tcx, R> {
fn visit_block_start(
&mut self,
- _results: &R,
+ _results: &mut R,
_state: &Self::FlowState,
_block_data: &'mir mir::BasicBlockData<'tcx>,
_block: BasicBlock,
@@ -46,7 +46,7 @@ pub trait ResultsVisitor<'mir, 'tcx, R> {
/// its `statement_effect`.
fn visit_statement_before_primary_effect(
&mut self,
- _results: &R,
+ _results: &mut R,
_state: &Self::FlowState,
_statement: &'mir mir::Statement<'tcx>,
_location: Location,
@@ -57,7 +57,7 @@ pub trait ResultsVisitor<'mir, 'tcx, R> {
/// statement applied to `state`.
fn visit_statement_after_primary_effect(
&mut self,
- _results: &R,
+ _results: &mut R,
_state: &Self::FlowState,
_statement: &'mir mir::Statement<'tcx>,
_location: Location,
@@ -68,7 +68,7 @@ pub trait ResultsVisitor<'mir, 'tcx, R> {
/// its `terminator_effect`.
fn visit_terminator_before_primary_effect(
&mut self,
- _results: &R,
+ _results: &mut R,
_state: &Self::FlowState,
_terminator: &'mir mir::Terminator<'tcx>,
_location: Location,
@@ -81,7 +81,7 @@ pub trait ResultsVisitor<'mir, 'tcx, R> {
/// The `call_return_effect` (if one exists) will *not* be applied to `state`.
fn visit_terminator_after_primary_effect(
&mut self,
- _results: &R,
+ _results: &mut R,
_state: &Self::FlowState,
_terminator: &'mir mir::Terminator<'tcx>,
_location: Location,
@@ -90,7 +90,7 @@ pub trait ResultsVisitor<'mir, 'tcx, R> {
fn visit_block_end(
&mut self,
- _results: &R,
+ _results: &mut R,
_state: &Self::FlowState,
_block_data: &'mir mir::BasicBlockData<'tcx>,
_block: BasicBlock,
diff --git a/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs b/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs
index 8d7b50796..f6398c8d0 100644
--- a/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs
+++ b/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs
@@ -5,7 +5,8 @@ use rustc_middle::mir::*;
use crate::{AnalysisDomain, GenKill, GenKillAnalysis};
/// A dataflow analysis that tracks whether a pointer or reference could possibly exist that points
-/// to a given local.
+/// to a given local. This analysis ignores fake borrows, so it should not be used by
+/// borrowck.
///
/// At present, this is used as a very limited form of alias analysis. For example,
/// `MaybeBorrowedLocals` is used to compute which locals are live during a yield expression for
@@ -91,13 +92,17 @@ where
self.super_rvalue(rvalue, location);
match rvalue {
- Rvalue::AddressOf(_, borrowed_place) | Rvalue::Ref(_, _, borrowed_place) => {
+ // We ignore fake borrows as these get removed after analysis and shouldn't effect
+ // the layout of generators.
+ Rvalue::AddressOf(_, borrowed_place)
+ | Rvalue::Ref(_, BorrowKind::Mut { .. } | BorrowKind::Shared, borrowed_place) => {
if !borrowed_place.is_indirect() {
self.trans.gen(borrowed_place.local);
}
}
Rvalue::Cast(..)
+ | Rvalue::Ref(_, BorrowKind::Fake, _)
| Rvalue::ShallowInitBox(..)
| Rvalue::Use(..)
| Rvalue::ThreadLocalRef(..)
@@ -131,7 +136,7 @@ where
}
}
- TerminatorKind::Terminate
+ TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Assert { .. }
| TerminatorKind::Call { .. }
| TerminatorKind::FalseEdge { .. }
@@ -139,7 +144,7 @@ where
| TerminatorKind::GeneratorDrop
| TerminatorKind::Goto { .. }
| TerminatorKind::InlineAsm { .. }
- | TerminatorKind::Resume
+ | TerminatorKind::UnwindResume
| TerminatorKind::Return
| TerminatorKind::SwitchInt { .. }
| TerminatorKind::Unreachable
diff --git a/compiler/rustc_mir_dataflow/src/impls/liveness.rs b/compiler/rustc_mir_dataflow/src/impls/liveness.rs
index 5aa73c7a9..664703795 100644
--- a/compiler/rustc_mir_dataflow/src/impls/liveness.rs
+++ b/compiler/rustc_mir_dataflow/src/impls/liveness.rs
@@ -201,7 +201,7 @@ impl DefUse {
| NonMutatingUseContext::Inspect
| NonMutatingUseContext::Move
| NonMutatingUseContext::PlaceMention
- | NonMutatingUseContext::ShallowBorrow
+ | NonMutatingUseContext::FakeBorrow
| NonMutatingUseContext::SharedBorrow,
) => Some(DefUse::Use),
diff --git a/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs b/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs
index bea23b7f7..94d6eb67d 100644
--- a/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs
+++ b/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs
@@ -291,14 +291,14 @@ impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeRequiresStorage<'_, '_, 'tcx> {
// Nothing to do for these. Match exhaustively so this fails to compile when new
// variants are added.
- TerminatorKind::Terminate
+ TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Assert { .. }
| TerminatorKind::Drop { .. }
| TerminatorKind::FalseEdge { .. }
| TerminatorKind::FalseUnwind { .. }
| TerminatorKind::GeneratorDrop
| TerminatorKind::Goto { .. }
- | TerminatorKind::Resume
+ | TerminatorKind::UnwindResume
| TerminatorKind::Return
| TerminatorKind::SwitchInt { .. }
| TerminatorKind::Unreachable => {}
@@ -328,14 +328,14 @@ impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeRequiresStorage<'_, '_, 'tcx> {
// Nothing to do for these. Match exhaustively so this fails to compile when new
// variants are added.
TerminatorKind::Yield { .. }
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Assert { .. }
| TerminatorKind::Drop { .. }
| TerminatorKind::FalseEdge { .. }
| TerminatorKind::FalseUnwind { .. }
| TerminatorKind::GeneratorDrop
| TerminatorKind::Goto { .. }
- | TerminatorKind::Resume
+ | TerminatorKind::UnwindResume
| TerminatorKind::Return
| TerminatorKind::SwitchInt { .. }
| TerminatorKind::Unreachable => {}
diff --git a/compiler/rustc_mir_dataflow/src/move_paths/abs_domain.rs b/compiler/rustc_mir_dataflow/src/move_paths/abs_domain.rs
index 7806e8f45..2a7f23ef6 100644
--- a/compiler/rustc_mir_dataflow/src/move_paths/abs_domain.rs
+++ b/compiler/rustc_mir_dataflow/src/move_paths/abs_domain.rs
@@ -57,6 +57,7 @@ impl<'tcx> Lift for PlaceElem<'tcx> {
ProjectionElem::ConstantIndex { offset, min_length, from_end }
}
ProjectionElem::Downcast(a, u) => ProjectionElem::Downcast(a, u),
+ ProjectionElem::Subtype(ty) => ProjectionElem::Subtype(ty.lift()),
}
}
}
diff --git a/compiler/rustc_mir_dataflow/src/move_paths/builder.rs b/compiler/rustc_mir_dataflow/src/move_paths/builder.rs
index 5052de991..7a5b3585d 100644
--- a/compiler/rustc_mir_dataflow/src/move_paths/builder.rs
+++ b/compiler/rustc_mir_dataflow/src/move_paths/builder.rs
@@ -115,44 +115,126 @@ impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> {
let body = self.builder.body;
let tcx = self.builder.tcx;
let place_ty = place_ref.ty(body, tcx).ty;
- match place_ty.kind() {
- ty::Ref(..) | ty::RawPtr(..) => {
- return Err(MoveError::cannot_move_out_of(
- self.loc,
- BorrowedContent { target_place: place_ref.project_deeper(&[elem], tcx) },
- ));
- }
- ty::Adt(adt, _) if adt.has_dtor(tcx) && !adt.is_box() => {
- return Err(MoveError::cannot_move_out_of(
- self.loc,
- InteriorOfTypeWithDestructor { container_ty: place_ty },
- ));
- }
- ty::Adt(adt, _) if adt.is_union() => {
- union_path.get_or_insert(base);
- }
- ty::Slice(_) => {
- return Err(MoveError::cannot_move_out_of(
- self.loc,
- InteriorOfSliceOrArray {
- ty: place_ty,
- is_index: matches!(elem, ProjectionElem::Index(..)),
- },
- ));
+ match elem {
+ ProjectionElem::Deref => match place_ty.kind() {
+ ty::Ref(..) | ty::RawPtr(..) => {
+ return Err(MoveError::cannot_move_out_of(
+ self.loc,
+ BorrowedContent {
+ target_place: place_ref.project_deeper(&[elem], tcx),
+ },
+ ));
+ }
+ ty::Adt(adt, _) => {
+ if !adt.is_box() {
+ bug!("Adt should be a box type when Place is deref");
+ }
+ }
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Foreign(_)
+ | ty::Str
+ | ty::Array(_, _)
+ | ty::Slice(_)
+ | ty::FnDef(_, _)
+ | ty::FnPtr(_)
+ | ty::Dynamic(_, _, _)
+ | ty::Closure(_, _)
+ | ty::Generator(_, _, _)
+ | ty::GeneratorWitness(..)
+ | ty::Never
+ | ty::Tuple(_)
+ | ty::Alias(_, _)
+ | ty::Param(_)
+ | ty::Bound(_, _)
+ | ty::Infer(_)
+ | ty::Error(_)
+ | ty::Placeholder(_) => {
+ bug!("When Place is Deref it's type shouldn't be {place_ty:#?}")
+ }
+ },
+ ProjectionElem::Field(_, _) => match place_ty.kind() {
+ ty::Adt(adt, _) => {
+ if adt.has_dtor(tcx) {
+ return Err(MoveError::cannot_move_out_of(
+ self.loc,
+ InteriorOfTypeWithDestructor { container_ty: place_ty },
+ ));
+ }
+ if adt.is_union() {
+ union_path.get_or_insert(base);
+ }
+ }
+ ty::Closure(_, _) | ty::Generator(_, _, _) | ty::Tuple(_) => (),
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Foreign(_)
+ | ty::Str
+ | ty::Array(_, _)
+ | ty::Slice(_)
+ | ty::RawPtr(_)
+ | ty::Ref(_, _, _)
+ | ty::FnDef(_, _)
+ | ty::FnPtr(_)
+ | ty::Dynamic(_, _, _)
+ | ty::GeneratorWitness(..)
+ | ty::Never
+ | ty::Alias(_, _)
+ | ty::Param(_)
+ | ty::Bound(_, _)
+ | ty::Infer(_)
+ | ty::Error(_)
+ | ty::Placeholder(_) => bug!(
+ "When Place contains ProjectionElem::Field it's type shouldn't be {place_ty:#?}"
+ ),
+ },
+ ProjectionElem::ConstantIndex { .. } | ProjectionElem::Subslice { .. } => {
+ match place_ty.kind() {
+ ty::Slice(_) => {
+ return Err(MoveError::cannot_move_out_of(
+ self.loc,
+ InteriorOfSliceOrArray {
+ ty: place_ty,
+ is_index: matches!(elem, ProjectionElem::Index(..)),
+ },
+ ));
+ }
+ ty::Array(_, _) => (),
+ _ => bug!("Unexpected type {:#?}", place_ty.is_array()),
+ }
}
-
- ty::Array(..) => {
- if let ProjectionElem::Index(..) = elem {
+ ProjectionElem::Index(_) => match place_ty.kind() {
+ ty::Array(..) => {
return Err(MoveError::cannot_move_out_of(
self.loc,
InteriorOfSliceOrArray { ty: place_ty, is_index: true },
));
}
- }
-
- _ => {}
- };
-
+ ty::Slice(_) => {
+ return Err(MoveError::cannot_move_out_of(
+ self.loc,
+ InteriorOfSliceOrArray {
+ ty: place_ty,
+ is_index: matches!(elem, ProjectionElem::Index(..)),
+ },
+ ));
+ }
+ _ => bug!("Unexpected type {place_ty:#?}"),
+ },
+ // `OpaqueCast`:Only transmutes the type, so no moves there.
+ // `Downcast` :Only changes information about a `Place` without moving.
+ // `Subtype` :Only transmutes the type, so moves.
+ // So it's safe to skip these.
+ ProjectionElem::OpaqueCast(_)
+ | ProjectionElem::Subtype(_)
+ | ProjectionElem::Downcast(_, _) => (),
+ }
if union_path.is_none() {
// inlined from add_move_path because of a borrowck conflict with the iterator
base =
@@ -370,8 +452,8 @@ impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> {
// this that could possibly access the return place, this doesn't
// need recording.
| TerminatorKind::Return
- | TerminatorKind::Resume
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindResume
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::GeneratorDrop
| TerminatorKind::Unreachable
| TerminatorKind::Drop { .. } => {}
diff --git a/compiler/rustc_mir_dataflow/src/rustc_peek.rs b/compiler/rustc_mir_dataflow/src/rustc_peek.rs
index 775c522b4..1ebb59b3a 100644
--- a/compiler/rustc_mir_dataflow/src/rustc_peek.rs
+++ b/compiler/rustc_mir_dataflow/src/rustc_peek.rs
@@ -190,7 +190,7 @@ impl PeekCall {
if let mir::TerminatorKind::Call { func: Operand::Constant(func), args, .. } =
&terminator.kind
{
- if let ty::FnDef(def_id, fn_args) = *func.literal.ty().kind() {
+ if let ty::FnDef(def_id, fn_args) = *func.const_.ty().kind() {
let name = tcx.item_name(def_id);
if !tcx.is_intrinsic(def_id) || name != sym::rustc_peek {
return None;
diff --git a/compiler/rustc_mir_dataflow/src/value_analysis.rs b/compiler/rustc_mir_dataflow/src/value_analysis.rs
index 766e0257e..83766f311 100644
--- a/compiler/rustc_mir_dataflow/src/value_analysis.rs
+++ b/compiler/rustc_mir_dataflow/src/value_analysis.rs
@@ -225,7 +225,7 @@ pub trait ValueAnalysis<'tcx> {
fn handle_constant(
&self,
- constant: &Constant<'tcx>,
+ constant: &ConstOperand<'tcx>,
state: &mut State<Self::Value>,
) -> Self::Value {
self.super_constant(constant, state)
@@ -233,7 +233,7 @@ pub trait ValueAnalysis<'tcx> {
fn super_constant(
&self,
- _constant: &Constant<'tcx>,
+ _constant: &ConstOperand<'tcx>,
_state: &mut State<Self::Value>,
) -> Self::Value {
Self::Value::TOP
@@ -269,8 +269,8 @@ pub trait ValueAnalysis<'tcx> {
return self.handle_switch_int(discr, targets, state);
}
TerminatorKind::Goto { .. }
- | TerminatorKind::Resume
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindResume
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::Assert { .. }
@@ -532,7 +532,7 @@ impl<V: Clone + HasTop + HasBottom> State<V> {
/// places that are non-overlapping or identical.
///
/// The target place must have been flooded before calling this method.
- fn insert_place_idx(&mut self, target: PlaceIndex, source: PlaceIndex, map: &Map) {
+ pub fn insert_place_idx(&mut self, target: PlaceIndex, source: PlaceIndex, map: &Map) {
let StateData::Reachable(values) = &mut self.0 else { return };
// If both places are tracked, we copy the value to the target.
@@ -581,6 +581,14 @@ impl<V: Clone + HasTop + HasBottom> State<V> {
}
}
+ /// Retrieve the value stored for a place, or ⊤ if it is not tracked.
+ pub fn get_len(&self, place: PlaceRef<'_>, map: &Map) -> V {
+ match map.find_len(place) {
+ Some(place) => self.get_idx(place, map),
+ None => V::TOP,
+ }
+ }
+
/// Retrieve the value stored for a place index, or ⊤ if it is not tracked.
pub fn get_idx(&self, place: PlaceIndex, map: &Map) -> V {
match &self.0 {
@@ -626,45 +634,36 @@ pub struct Map {
}
impl Map {
- fn new() -> Self {
- Self {
+ /// Returns a map that only tracks places whose type has scalar layout.
+ ///
+ /// This is currently the only way to create a [`Map`]. The way in which the tracked places are
+ /// chosen is an implementation detail and may not be relied upon (other than that their type
+ /// are scalars).
+ pub fn new<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, value_limit: Option<usize>) -> Self {
+ let mut map = Self {
locals: IndexVec::new(),
projections: FxHashMap::default(),
places: IndexVec::new(),
value_count: 0,
inner_values: IndexVec::new(),
inner_values_buffer: Vec::new(),
- }
- }
-
- /// Returns a map that only tracks places whose type passes the filter.
- ///
- /// This is currently the only way to create a [`Map`]. The way in which the tracked places are
- /// chosen is an implementation detail and may not be relied upon (other than that their type
- /// passes the filter).
- pub fn from_filter<'tcx>(
- tcx: TyCtxt<'tcx>,
- body: &Body<'tcx>,
- filter: impl Fn(Ty<'tcx>) -> bool,
- value_limit: Option<usize>,
- ) -> Self {
- let mut map = Self::new();
+ };
let exclude = excluded_locals(body);
- map.register_with_filter(tcx, body, filter, exclude, value_limit);
+ map.register(tcx, body, exclude, value_limit);
debug!("registered {} places ({} nodes in total)", map.value_count, map.places.len());
map
}
- /// Register all non-excluded places that pass the filter.
- fn register_with_filter<'tcx>(
+ /// Register all non-excluded places that have scalar layout.
+ fn register<'tcx>(
&mut self,
tcx: TyCtxt<'tcx>,
body: &Body<'tcx>,
- filter: impl Fn(Ty<'tcx>) -> bool,
exclude: BitSet<Local>,
value_limit: Option<usize>,
) {
let mut worklist = VecDeque::with_capacity(value_limit.unwrap_or(body.local_decls.len()));
+ let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
// Start by constructing the places for each bare local.
self.locals = IndexVec::from_elem(None, &body.local_decls);
@@ -679,7 +678,7 @@ impl Map {
self.locals[local] = Some(place);
// And push the eventual children places to the worklist.
- self.register_children(tcx, place, decl.ty, &filter, &mut worklist);
+ self.register_children(tcx, param_env, place, decl.ty, &mut worklist);
}
// `place.elem1.elem2` with type `ty`.
@@ -702,7 +701,7 @@ impl Map {
}
// And push the eventual children places to the worklist.
- self.register_children(tcx, place, ty, &filter, &mut worklist);
+ self.register_children(tcx, param_env, place, ty, &mut worklist);
}
// Pre-compute the tree of ValueIndex nested in each PlaceIndex.
@@ -732,42 +731,54 @@ impl Map {
fn register_children<'tcx>(
&mut self,
tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
place: PlaceIndex,
ty: Ty<'tcx>,
- filter: &impl Fn(Ty<'tcx>) -> bool,
worklist: &mut VecDeque<(PlaceIndex, Option<TrackElem>, TrackElem, Ty<'tcx>)>,
) {
// Allocate a value slot if it doesn't have one, and the user requested one.
- if self.places[place].value_index.is_none() && filter(ty) {
+ assert!(self.places[place].value_index.is_none());
+ if tcx.layout_of(param_env.and(ty)).map_or(false, |layout| layout.abi.is_scalar()) {
self.places[place].value_index = Some(self.value_count.into());
self.value_count += 1;
}
// For enums, directly create the `Discriminant`, as that's their main use.
if ty.is_enum() {
- let discr_ty = ty.discriminant_ty(tcx);
- if filter(discr_ty) {
- let discr = *self
- .projections
- .entry((place, TrackElem::Discriminant))
- .or_insert_with(|| {
- // Prepend new child to the linked list.
- let next = self.places.push(PlaceInfo::new(Some(TrackElem::Discriminant)));
- self.places[next].next_sibling = self.places[place].first_child;
- self.places[place].first_child = Some(next);
- next
- });
-
- // Allocate a value slot if it doesn't have one.
- if self.places[discr].value_index.is_none() {
- self.places[discr].value_index = Some(self.value_count.into());
- self.value_count += 1;
- }
- }
+ // Prepend new child to the linked list.
+ let discr = self.places.push(PlaceInfo::new(Some(TrackElem::Discriminant)));
+ self.places[discr].next_sibling = self.places[place].first_child;
+ self.places[place].first_child = Some(discr);
+ let old = self.projections.insert((place, TrackElem::Discriminant), discr);
+ assert!(old.is_none());
+
+ // Allocate a value slot since it doesn't have one.
+ assert!(self.places[discr].value_index.is_none());
+ self.places[discr].value_index = Some(self.value_count.into());
+ self.value_count += 1;
+ }
+
+ if let ty::Ref(_, ref_ty, _) | ty::RawPtr(ty::TypeAndMut { ty: ref_ty, .. }) = ty.kind()
+ && let ty::Slice(..) = ref_ty.kind()
+ {
+ assert!(self.places[place].value_index.is_none(), "slices are not scalars");
+
+ // Prepend new child to the linked list.
+ let len = self.places.push(PlaceInfo::new(Some(TrackElem::DerefLen)));
+ self.places[len].next_sibling = self.places[place].first_child;
+ self.places[place].first_child = Some(len);
+
+ let old = self.projections.insert((place, TrackElem::DerefLen), len);
+ assert!(old.is_none());
+
+ // Allocate a value slot since it doesn't have one.
+ assert!( self.places[len].value_index.is_none() );
+ self.places[len].value_index = Some(self.value_count.into());
+ self.value_count += 1;
}
// Recurse with all fields of this place.
- iter_fields(ty, tcx, ty::ParamEnv::reveal_all(), |variant, field, ty| {
+ iter_fields(ty, tcx, param_env, |variant, field, ty| {
worklist.push_back((
place,
variant.map(TrackElem::Variant),
@@ -834,6 +845,11 @@ impl Map {
self.find_extra(place, [TrackElem::Discriminant])
}
+ /// Locates the given place and applies `DerefLen`, if it exists in the tree.
+ pub fn find_len(&self, place: PlaceRef<'_>) -> Option<PlaceIndex> {
+ self.find_extra(place, [TrackElem::DerefLen])
+ }
+
/// Iterate over all direct children.
pub fn children(&self, parent: PlaceIndex) -> impl Iterator<Item = PlaceIndex> + '_ {
Children::new(self, parent)
@@ -914,6 +930,31 @@ impl Map {
f(v)
}
}
+
+ /// Invoke a function on each value in the given place and all descendants.
+ pub fn for_each_projection_value<O>(
+ &self,
+ root: PlaceIndex,
+ value: O,
+ project: &mut impl FnMut(TrackElem, &O) -> Option<O>,
+ f: &mut impl FnMut(PlaceIndex, &O),
+ ) {
+ // Fast path is there is nothing to do.
+ if self.inner_values[root].is_empty() {
+ return;
+ }
+
+ if self.places[root].value_index.is_some() {
+ f(root, &value)
+ }
+
+ for child in self.children(root) {
+ let elem = self.places[child].proj_elem.unwrap();
+ if let Some(value) = project(elem, &value) {
+ self.for_each_projection_value(child, value, project, f);
+ }
+ }
+ }
}
/// This is the information tracked for every [`PlaceIndex`] and is stored by [`Map`].
@@ -985,6 +1026,8 @@ pub enum TrackElem {
Field(FieldIdx),
Variant(VariantIdx),
Discriminant,
+ // Length of a slice.
+ DerefLen,
}
impl<V, T> TryFrom<ProjectionElem<V, T>> for TrackElem {
@@ -1124,6 +1167,9 @@ fn debug_with_context_rec<V: Debug + Eq>(
format!("{}.{}", place_str, field.index())
}
}
+ TrackElem::DerefLen => {
+ format!("Len(*{})", place_str)
+ }
};
debug_with_context_rec(child, &child_place_str, new, old, map, f)?;
}
diff --git a/compiler/rustc_mir_transform/messages.ftl b/compiler/rustc_mir_transform/messages.ftl
index 2598eb2ed..5a99afc45 100644
--- a/compiler/rustc_mir_transform/messages.ftl
+++ b/compiler/rustc_mir_transform/messages.ftl
@@ -42,8 +42,6 @@ mir_transform_requires_unsafe = {$details} is unsafe and requires unsafe {$op_in
}
.not_inherited = items do not inherit unsafety from separate enclosing items
-mir_transform_simd_shuffle_last_const = last argument of `simd_shuffle` is required to be a `const` item
-
mir_transform_target_feature_call_label = call to function with `#[target_feature]`
mir_transform_target_feature_call_note = can only be called if the required target features are available
diff --git a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
index 5aed89139..4500bb7ff 100644
--- a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
+++ b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
@@ -104,7 +104,7 @@ impl<'tcx> MirPass<'tcx> for AbortUnwindingCalls {
for id in calls_to_terminate {
let cleanup = body.basic_blocks_mut()[id].terminator_mut().unwind_mut().unwrap();
- *cleanup = UnwindAction::Terminate;
+ *cleanup = UnwindAction::Terminate(UnwindTerminateReason::Abi);
}
for id in cleanups_to_remove {
diff --git a/compiler/rustc_mir_transform/src/add_call_guards.rs b/compiler/rustc_mir_transform/src/add_call_guards.rs
index fb4705e07..b814fbf32 100644
--- a/compiler/rustc_mir_transform/src/add_call_guards.rs
+++ b/compiler/rustc_mir_transform/src/add_call_guards.rs
@@ -53,8 +53,10 @@ impl AddCallGuards {
kind: TerminatorKind::Call { target: Some(ref mut destination), unwind, .. },
source_info,
}) if pred_count[*destination] > 1
- && (matches!(unwind, UnwindAction::Cleanup(_) | UnwindAction::Terminate)
- || self == &AllCallEdges) =>
+ && (matches!(
+ unwind,
+ UnwindAction::Cleanup(_) | UnwindAction::Terminate(_)
+ ) || self == &AllCallEdges) =>
{
// It's a critical edge, break it
let call_guard = BasicBlockData {
diff --git a/compiler/rustc_mir_transform/src/add_subtyping_projections.rs b/compiler/rustc_mir_transform/src/add_subtyping_projections.rs
new file mode 100644
index 000000000..e5be7c0ca
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/add_subtyping_projections.rs
@@ -0,0 +1,70 @@
+use crate::MirPass;
+use rustc_index::IndexVec;
+use rustc_middle::mir::patch::MirPatch;
+use rustc_middle::mir::visit::MutVisitor;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+pub struct Subtyper;
+
+pub struct SubTypeChecker<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ patcher: MirPatch<'tcx>,
+ local_decls: &'a IndexVec<Local, LocalDecl<'tcx>>,
+}
+
+impl<'a, 'tcx> MutVisitor<'tcx> for SubTypeChecker<'a, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_assign(
+ &mut self,
+ place: &mut Place<'tcx>,
+ rvalue: &mut Rvalue<'tcx>,
+ location: Location,
+ ) {
+ // We don't need to do anything for deref temps as they are
+ // not part of the source code, but used for desugaring purposes.
+ if self.local_decls[place.local].is_deref_temp() {
+ return;
+ }
+ let mut place_ty = place.ty(self.local_decls, self.tcx).ty;
+ let mut rval_ty = rvalue.ty(self.local_decls, self.tcx);
+ // Not erasing this causes `Free Regions` errors in validator,
+ // when rval is `ReStatic`.
+ rval_ty = self.tcx.erase_regions_ty(rval_ty);
+ place_ty = self.tcx.erase_regions(place_ty);
+ if place_ty != rval_ty {
+ let temp = self
+ .patcher
+ .new_temp(rval_ty, self.local_decls[place.as_ref().local].source_info.span);
+ let new_place = Place::from(temp);
+ self.patcher.add_assign(location, new_place, rvalue.clone());
+ let subtyped = new_place.project_deeper(&[ProjectionElem::Subtype(place_ty)], self.tcx);
+ *rvalue = Rvalue::Use(Operand::Move(subtyped));
+ }
+ }
+}
+
+// Aim here is to do this kind of transformation:
+//
+// let place: place_ty = rval;
+// // gets transformed to
+// let temp: rval_ty = rval;
+// let place: place_ty = temp as place_ty;
+pub fn subtype_finder<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let patch = MirPatch::new(body);
+ let mut checker = SubTypeChecker { tcx, patcher: patch, local_decls: &body.local_decls };
+
+ for (bb, data) in body.basic_blocks.as_mut_preserves_cfg().iter_enumerated_mut() {
+ checker.visit_basic_block_data(bb, data);
+ }
+ checker.patcher.apply(body);
+}
+
+impl<'tcx> MirPass<'tcx> for Subtyper {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ subtype_finder(tcx, body);
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/check_alignment.rs b/compiler/rustc_mir_transform/src/check_alignment.rs
index 4892ace53..28765af20 100644
--- a/compiler/rustc_mir_transform/src/check_alignment.rs
+++ b/compiler/rustc_mir_transform/src/check_alignment.rs
@@ -4,7 +4,7 @@ use rustc_hir::lang_items::LangItem;
use rustc_index::IndexVec;
use rustc_middle::mir::*;
use rustc_middle::mir::{
- interpret::{ConstValue, Scalar},
+ interpret::Scalar,
visit::{PlaceContext, Visitor},
};
use rustc_middle::ty::{Ty, TyCtxt, TypeAndMut};
@@ -181,13 +181,10 @@ fn insert_alignment_check<'tcx>(
// Subtract 1 from the alignment to get the alignment mask
let alignment_mask =
local_decls.push(LocalDecl::with_source_info(tcx.types.usize, source_info)).into();
- let one = Operand::Constant(Box::new(Constant {
+ let one = Operand::Constant(Box::new(ConstOperand {
span: source_info.span,
user_ty: None,
- literal: ConstantKind::Val(
- ConstValue::Scalar(Scalar::from_target_usize(1, &tcx)),
- tcx.types.usize,
- ),
+ const_: Const::Val(ConstValue::Scalar(Scalar::from_target_usize(1, &tcx)), tcx.types.usize),
}));
block_data.statements.push(Statement {
source_info,
@@ -213,13 +210,10 @@ fn insert_alignment_check<'tcx>(
// Check if the alignment bits are all zero
let is_ok = local_decls.push(LocalDecl::with_source_info(tcx.types.bool, source_info)).into();
- let zero = Operand::Constant(Box::new(Constant {
+ let zero = Operand::Constant(Box::new(ConstOperand {
span: source_info.span,
user_ty: None,
- literal: ConstantKind::Val(
- ConstValue::Scalar(Scalar::from_target_usize(0, &tcx)),
- tcx.types.usize,
- ),
+ const_: Const::Val(ConstValue::Scalar(Scalar::from_target_usize(0, &tcx)), tcx.types.usize),
}));
block_data.statements.push(Statement {
source_info,
diff --git a/compiler/rustc_mir_transform/src/check_unsafety.rs b/compiler/rustc_mir_transform/src/check_unsafety.rs
index 58e9786ec..bacabc62e 100644
--- a/compiler/rustc_mir_transform/src/check_unsafety.rs
+++ b/compiler/rustc_mir_transform/src/check_unsafety.rs
@@ -57,8 +57,8 @@ impl<'tcx> Visitor<'tcx> for UnsafetyChecker<'_, 'tcx> {
| TerminatorKind::Yield { .. }
| TerminatorKind::Assert { .. }
| TerminatorKind::GeneratorDrop
- | TerminatorKind::Resume
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindResume
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::FalseEdge { .. }
@@ -142,9 +142,9 @@ impl<'tcx> Visitor<'tcx> for UnsafetyChecker<'_, 'tcx> {
fn visit_operand(&mut self, op: &Operand<'tcx>, location: Location) {
if let Operand::Constant(constant) = op {
- let maybe_uneval = match constant.literal {
- ConstantKind::Val(..) | ConstantKind::Ty(_) => None,
- ConstantKind::Unevaluated(uv, _) => Some(uv),
+ let maybe_uneval = match constant.const_ {
+ Const::Val(..) | Const::Ty(_) => None,
+ Const::Unevaluated(uv, _) => Some(uv),
};
if let Some(uv) = maybe_uneval {
@@ -483,7 +483,7 @@ fn unsafety_check_result(tcx: TyCtxt<'_>, def: LocalDefId) -> &UnsafetyCheckResu
// `mir_built` force this.
let body = &tcx.mir_built(def).borrow();
- if body.is_custom_mir() {
+ if body.is_custom_mir() || body.tainted_by_errors.is_some() {
return tcx.arena.alloc(UnsafetyCheckResult {
violations: Vec::new(),
used_unsafe_blocks: Default::default(),
diff --git a/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs b/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs
index d435d3ee6..5b4bc4fa1 100644
--- a/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs
+++ b/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs
@@ -4,13 +4,13 @@
//!
//! - [`AscribeUserType`]
//! - [`FakeRead`]
-//! - [`Assign`] statements with a [`Shallow`] borrow
+//! - [`Assign`] statements with a [`Fake`] borrow
//!
//! [`AscribeUserType`]: rustc_middle::mir::StatementKind::AscribeUserType
//! [`Assign`]: rustc_middle::mir::StatementKind::Assign
//! [`FakeRead`]: rustc_middle::mir::StatementKind::FakeRead
//! [`Nop`]: rustc_middle::mir::StatementKind::Nop
-//! [`Shallow`]: rustc_middle::mir::BorrowKind::Shallow
+//! [`Fake`]: rustc_middle::mir::BorrowKind::Fake
use crate::MirPass;
use rustc_middle::mir::{Body, BorrowKind, Rvalue, StatementKind, TerminatorKind};
@@ -24,7 +24,7 @@ impl<'tcx> MirPass<'tcx> for CleanupPostBorrowck {
for statement in basic_block.statements.iter_mut() {
match statement.kind {
StatementKind::AscribeUserType(..)
- | StatementKind::Assign(box (_, Rvalue::Ref(_, BorrowKind::Shallow, _)))
+ | StatementKind::Assign(box (_, Rvalue::Ref(_, BorrowKind::Fake, _)))
| StatementKind::FakeRead(..) => statement.make_nop(),
_ => (),
}
diff --git a/compiler/rustc_mir_transform/src/const_debuginfo.rs b/compiler/rustc_mir_transform/src/const_debuginfo.rs
index f662ce645..40cd28254 100644
--- a/compiler/rustc_mir_transform/src/const_debuginfo.rs
+++ b/compiler/rustc_mir_transform/src/const_debuginfo.rs
@@ -4,7 +4,7 @@
use rustc_middle::{
mir::{
visit::{PlaceContext, Visitor},
- Body, Constant, Local, Location, Operand, Rvalue, StatementKind, VarDebugInfoContents,
+ Body, ConstOperand, Local, Location, Operand, Rvalue, StatementKind, VarDebugInfoContents,
},
ty::TyCtxt,
};
@@ -45,7 +45,7 @@ struct LocalUseVisitor {
local_assignment_locations: IndexVec<Local, Option<Location>>,
}
-fn find_optimization_opportunities<'tcx>(body: &Body<'tcx>) -> Vec<(Local, Constant<'tcx>)> {
+fn find_optimization_opportunities<'tcx>(body: &Body<'tcx>) -> Vec<(Local, ConstOperand<'tcx>)> {
let mut visitor = LocalUseVisitor {
local_mutating_uses: IndexVec::from_elem(0, &body.local_decls),
local_assignment_locations: IndexVec::from_elem(None, &body.local_decls),
diff --git a/compiler/rustc_mir_transform/src/const_goto.rs b/compiler/rustc_mir_transform/src/const_goto.rs
index e175f22d7..fd2d37dbe 100644
--- a/compiler/rustc_mir_transform/src/const_goto.rs
+++ b/compiler/rustc_mir_transform/src/const_goto.rs
@@ -96,10 +96,10 @@ impl<'tcx> Visitor<'tcx> for ConstGotoOptimizationFinder<'_, 'tcx> {
let (discr, targets) = target_bb_terminator.kind.as_switch()?;
if discr.place() == Some(*place) {
let switch_ty = place.ty(self.body.local_decls(), self.tcx).ty;
+ debug_assert_eq!(switch_ty, _const.ty());
// We now know that the Switch matches on the const place, and it is statementless
// Now find which value in the Switch matches the const value.
- let const_value =
- _const.literal.try_eval_bits(self.tcx, self.param_env, switch_ty)?;
+ let const_value = _const.const_.try_eval_bits(self.tcx, self.param_env)?;
let target_to_use_in_goto = targets.target_for_value(const_value);
self.optimizations.push(OptimizationToApply {
bb_with_goto: location.block,
diff --git a/compiler/rustc_mir_transform/src/const_prop.rs b/compiler/rustc_mir_transform/src/const_prop.rs
index 7529ed818..50443e739 100644
--- a/compiler/rustc_mir_transform/src/const_prop.rs
+++ b/compiler/rustc_mir_transform/src/const_prop.rs
@@ -15,15 +15,15 @@ use rustc_middle::mir::visit::{
use rustc_middle::mir::*;
use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
use rustc_middle::ty::{self, GenericArgs, Instance, ParamEnv, Ty, TyCtxt, TypeVisitableExt};
-use rustc_span::{def_id::DefId, Span, DUMMY_SP};
+use rustc_span::{def_id::DefId, Span};
use rustc_target::abi::{self, Align, HasDataLayout, Size, TargetDataLayout};
use rustc_target::spec::abi::Abi as CallAbi;
+use crate::dataflow_const_prop::Patch;
use crate::MirPass;
use rustc_const_eval::interpret::{
- self, compile_time_machine, AllocId, ConstAllocation, ConstValue, FnArg, Frame, ImmTy,
- Immediate, InterpCx, InterpResult, LocalValue, MemoryKind, OpTy, PlaceTy, Pointer, Scalar,
- StackPopCleanup,
+ self, compile_time_machine, AllocId, ConstAllocation, FnArg, Frame, ImmTy, Immediate, InterpCx,
+ InterpResult, MemoryKind, OpTy, PlaceTy, Pointer, Scalar, StackPopCleanup,
};
/// The maximum number of bytes that we'll allocate space for a local or the return value.
@@ -33,32 +33,30 @@ const MAX_ALLOC_LIMIT: u64 = 1024;
/// Macro for machine-specific `InterpError` without allocation.
/// (These will never be shown to the user, but they help diagnose ICEs.)
-macro_rules! throw_machine_stop_str {
- ($($tt:tt)*) => {{
- // We make a new local type for it. The type itself does not carry any information,
- // but its vtable (for the `MachineStopType` trait) does.
- #[derive(Debug)]
- struct Zst;
- // Printing this type shows the desired string.
- impl std::fmt::Display for Zst {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(f, $($tt)*)
- }
+pub(crate) macro throw_machine_stop_str($($tt:tt)*) {{
+ // We make a new local type for it. The type itself does not carry any information,
+ // but its vtable (for the `MachineStopType` trait) does.
+ #[derive(Debug)]
+ struct Zst;
+ // Printing this type shows the desired string.
+ impl std::fmt::Display for Zst {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, $($tt)*)
}
+ }
- impl rustc_middle::mir::interpret::MachineStopType for Zst {
- fn diagnostic_message(&self) -> rustc_errors::DiagnosticMessage {
- self.to_string().into()
- }
-
- fn add_args(
- self: Box<Self>,
- _: &mut dyn FnMut(std::borrow::Cow<'static, str>, rustc_errors::DiagnosticArgValue<'static>),
- ) {}
+ impl rustc_middle::mir::interpret::MachineStopType for Zst {
+ fn diagnostic_message(&self) -> rustc_errors::DiagnosticMessage {
+ self.to_string().into()
}
- throw_machine_stop!(Zst)
- }};
-}
+
+ fn add_args(
+ self: Box<Self>,
+ _: &mut dyn FnMut(std::borrow::Cow<'static, str>, rustc_errors::DiagnosticArgValue<'static>),
+ ) {}
+ }
+ throw_machine_stop!(Zst)
+}}
pub struct ConstProp;
@@ -86,9 +84,9 @@ impl<'tcx> MirPass<'tcx> for ConstProp {
return;
}
- let is_generator = tcx.type_of(def_id.to_def_id()).instantiate_identity().is_generator();
// FIXME(welseywiser) const prop doesn't work on generators because of query cycles
// computing their layout.
+ let is_generator = def_kind == DefKind::Generator;
if is_generator {
trace!("ConstProp skipped for generator {:?}", def_id);
return;
@@ -96,33 +94,22 @@ impl<'tcx> MirPass<'tcx> for ConstProp {
trace!("ConstProp starting for {:?}", def_id);
- let dummy_body = &Body::new(
- body.source,
- (*body.basic_blocks).to_owned(),
- body.source_scopes.clone(),
- body.local_decls.clone(),
- Default::default(),
- body.arg_count,
- Default::default(),
- body.span,
- body.generator_kind(),
- body.tainted_by_errors,
- );
-
// FIXME(oli-obk, eddyb) Optimize locals (or even local paths) to hold
// constants, instead of just checking for const-folding succeeding.
// That would require a uniform one-def no-mutation analysis
// and RPO (or recursing when needing the value of a local).
- let mut optimization_finder = ConstPropagator::new(body, dummy_body, tcx);
+ let mut optimization_finder = ConstPropagator::new(body, tcx);
// Traverse the body in reverse post-order, to ensure that `FullConstProp` locals are
// assigned before being read.
- let rpo = body.basic_blocks.reverse_postorder().to_vec();
- for bb in rpo {
- let data = &mut body.basic_blocks.as_mut_preserves_cfg()[bb];
+ for &bb in body.basic_blocks.reverse_postorder() {
+ let data = &body.basic_blocks[bb];
optimization_finder.visit_basic_block_data(bb, data);
}
+ let mut patch = optimization_finder.patch;
+ patch.visit_body_preserves_cfg(body);
+
trace!("ConstProp done for {:?}", def_id);
}
}
@@ -146,14 +133,17 @@ impl ConstPropMachine<'_, '_> {
impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx> {
compile_time_machine!(<'mir, 'tcx>);
+
const PANIC_ON_ALLOC_FAIL: bool = true; // all allocations are small (see `MAX_ALLOC_LIMIT`)
+ const POST_MONO_CHECKS: bool = false; // this MIR is still generic!
+
type MemoryKind = !;
#[inline(always)]
fn enforce_alignment(_ecx: &InterpCx<'mir, 'tcx, Self>) -> CheckAlignment {
// We do not check for alignment to avoid having to carry an `Align`
- // in `ConstValue::ByRef`.
+ // in `ConstValue::Indirect`.
CheckAlignment::No
}
@@ -180,6 +170,10 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx>
throw_machine_stop_str!("calling functions isn't supported in ConstProp")
}
+ fn panic_nounwind(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _msg: &str) -> InterpResult<'tcx> {
+ throw_machine_stop_str!("panicking isn't supported in ConstProp")
+ }
+
fn find_mir_or_eval_fn(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_instance: ty::Instance<'tcx>,
@@ -216,16 +210,16 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx>
_bin_op: BinOp,
_left: &ImmTy<'tcx>,
_right: &ImmTy<'tcx>,
- ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
+ ) -> InterpResult<'tcx, (ImmTy<'tcx>, bool)> {
// We can't do this because aliasing of memory can differ between const eval and llvm
throw_machine_stop_str!("pointer arithmetic or comparisons aren't supported in ConstProp")
}
- fn access_local_mut<'a>(
+ fn before_access_local_mut<'a>(
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
frame: usize,
local: Local,
- ) -> InterpResult<'tcx, &'a mut interpret::Operand<Self::Provenance>> {
+ ) -> InterpResult<'tcx> {
assert_eq!(frame, 0);
match ecx.machine.can_const_prop[local] {
ConstPropMode::NoPropagation => {
@@ -238,7 +232,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx>
}
ConstPropMode::FullConstProp => {}
}
- ecx.machine.stack[frame].locals[local].access_mut()
+ Ok(())
}
fn before_access_global(
@@ -298,6 +292,7 @@ struct ConstPropagator<'mir, 'tcx> {
tcx: TyCtxt<'tcx>,
param_env: ParamEnv<'tcx>,
local_decls: &'mir IndexSlice<Local, LocalDecl<'tcx>>,
+ patch: Patch<'tcx>,
}
impl<'tcx> LayoutOfHelpers<'tcx> for ConstPropagator<'_, 'tcx> {
@@ -331,11 +326,7 @@ impl<'tcx> ty::layout::HasParamEnv<'tcx> for ConstPropagator<'_, 'tcx> {
}
impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
- fn new(
- body: &Body<'tcx>,
- dummy_body: &'mir Body<'tcx>,
- tcx: TyCtxt<'tcx>,
- ) -> ConstPropagator<'mir, 'tcx> {
+ fn new(body: &'mir Body<'tcx>, tcx: TyCtxt<'tcx>) -> ConstPropagator<'mir, 'tcx> {
let def_id = body.source.def_id();
let args = &GenericArgs::identity_for_item(tcx, def_id);
let param_env = tcx.param_env_reveal_all_normalized(def_id);
@@ -366,19 +357,33 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
ecx.push_stack_frame(
Instance::new(def_id, args),
- dummy_body,
+ body,
&ret,
StackPopCleanup::Root { cleanup: false },
)
.expect("failed to push initial stack frame");
- ConstPropagator { ecx, tcx, param_env, local_decls: &dummy_body.local_decls }
+ for local in body.local_decls.indices() {
+ // Mark everything initially live.
+ // This is somewhat dicey since some of them might be unsized and it is incoherent to
+ // mark those as live... We rely on `local_to_place`/`local_to_op` in the interpreter
+ // stopping us before those unsized immediates can cause issues deeper in the
+ // interpreter.
+ ecx.frame_mut().locals[local].make_live_uninit();
+ }
+
+ let patch = Patch::new(tcx);
+ ConstPropagator { ecx, tcx, param_env, local_decls: &body.local_decls, patch }
}
fn get_const(&self, place: Place<'tcx>) -> Option<OpTy<'tcx>> {
let op = match self.ecx.eval_place_to_op(place, None) {
Ok(op) => {
- if matches!(*op, interpret::Operand::Immediate(Immediate::Uninit)) {
+ if op
+ .as_mplace_or_imm()
+ .right()
+ .is_some_and(|imm| matches!(*imm, Immediate::Uninit))
+ {
// Make sure nobody accidentally uses this value.
return None;
}
@@ -401,17 +406,10 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
/// Remove `local` from the pool of `Locals`. Allows writing to them,
/// but not reading from them anymore.
fn remove_const(ecx: &mut InterpCx<'mir, 'tcx, ConstPropMachine<'mir, 'tcx>>, local: Local) {
- ecx.frame_mut().locals[local].value =
- LocalValue::Live(interpret::Operand::Immediate(interpret::Immediate::Uninit));
+ ecx.frame_mut().locals[local].make_live_uninit();
ecx.machine.written_only_inside_own_block_locals.remove(&local);
}
- fn propagate_operand(&mut self, operand: &mut Operand<'tcx>) {
- if let Some(place) = operand.place() && let Some(op) = self.replace_with_const(place) {
- *operand = op;
- }
- }
-
fn check_rvalue(&mut self, rvalue: &Rvalue<'tcx>) -> Option<()> {
// Perform any special handling for specific Rvalue types.
// Generally, checks here fall into one of two categories:
@@ -527,16 +525,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
}
}
- /// Creates a new `Operand::Constant` from a `Scalar` value
- fn operand_from_scalar(&self, scalar: Scalar, ty: Ty<'tcx>) -> Operand<'tcx> {
- Operand::Constant(Box::new(Constant {
- span: DUMMY_SP,
- user_ty: None,
- literal: ConstantKind::from_scalar(self.tcx, scalar, ty),
- }))
- }
-
- fn replace_with_const(&mut self, place: Place<'tcx>) -> Option<Operand<'tcx>> {
+ fn replace_with_const(&mut self, place: Place<'tcx>) -> Option<Const<'tcx>> {
// This will return None if the above `const_prop` invocation only "wrote" a
// type whose creation requires no write. E.g. a generator whose initial state
// consists solely of uninitialized memory (so it doesn't capture any locals).
@@ -546,31 +535,26 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
}
trace!("replacing {:?} with {:?}", place, value);
- // FIXME> figure out what to do when read_immediate_raw fails
+ // FIXME: figure out what to do when read_immediate_raw fails
let imm = self.ecx.read_immediate_raw(&value).ok()?;
let Right(imm) = imm else { return None };
match *imm {
Immediate::Scalar(scalar) if scalar.try_to_int().is_ok() => {
- Some(self.operand_from_scalar(scalar, value.layout.ty))
+ Some(Const::from_scalar(self.tcx, scalar, value.layout.ty))
}
Immediate::ScalarPair(l, r) if l.try_to_int().is_ok() && r.try_to_int().is_ok() => {
- let alloc = self
+ let alloc_id = self
.ecx
.intern_with_temp_alloc(value.layout, |ecx, dest| {
ecx.write_immediate(*imm, dest)
})
.ok()?;
- let literal = ConstantKind::Val(
- ConstValue::ByRef { alloc, offset: Size::ZERO },
+ Some(Const::Val(
+ ConstValue::Indirect { alloc_id, offset: Size::ZERO },
value.layout.ty,
- );
- Some(Operand::Constant(Box::new(Constant {
- span: DUMMY_SP,
- user_ty: None,
- literal,
- })))
+ ))
}
// Scalars or scalar pairs that contain undef values are assumed to not have
// successfully evaluated and are thus not propagated.
@@ -699,7 +683,7 @@ impl<'tcx> Visitor<'tcx> for CanConstProp {
// These can't ever be propagated under any scheme, as we can't reason about indirect
// mutation.
| NonMutatingUse(NonMutatingUseContext::SharedBorrow)
- | NonMutatingUse(NonMutatingUseContext::ShallowBorrow)
+ | NonMutatingUse(NonMutatingUseContext::FakeBorrow)
| NonMutatingUse(NonMutatingUseContext::AddressOf)
| MutatingUse(MutatingUseContext::Borrow)
| MutatingUse(MutatingUseContext::AddressOf) => {
@@ -712,39 +696,29 @@ impl<'tcx> Visitor<'tcx> for CanConstProp {
}
}
-impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> {
- fn tcx(&self) -> TyCtxt<'tcx> {
- self.tcx
- }
-
- fn visit_operand(&mut self, operand: &mut Operand<'tcx>, location: Location) {
+impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
+ fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
self.super_operand(operand, location);
- self.propagate_operand(operand)
+ if let Some(place) = operand.place() && let Some(value) = self.replace_with_const(place) {
+ self.patch.before_effect.insert((location, place), value);
+ }
}
- fn process_projection_elem(
+ fn visit_projection_elem(
&mut self,
+ _: PlaceRef<'tcx>,
elem: PlaceElem<'tcx>,
- _: Location,
- ) -> Option<PlaceElem<'tcx>> {
+ _: PlaceContext,
+ location: Location,
+ ) {
if let PlaceElem::Index(local) = elem
- && let Some(value) = self.get_const(local.into())
- && let interpret::Operand::Immediate(Immediate::Scalar(scalar)) = *value
- && let Ok(offset) = scalar.to_target_usize(&self.tcx)
- && let Some(min_length) = offset.checked_add(1)
+ && let Some(value) = self.replace_with_const(local.into())
{
- Some(PlaceElem::ConstantIndex { offset, min_length, from_end: false })
- } else {
- None
+ self.patch.before_effect.insert((location, local.into()), value);
}
}
- fn visit_assign(
- &mut self,
- place: &mut Place<'tcx>,
- rvalue: &mut Rvalue<'tcx>,
- location: Location,
- ) {
+ fn visit_assign(&mut self, place: &Place<'tcx>, rvalue: &Rvalue<'tcx>, location: Location) {
self.super_assign(place, rvalue, location);
let Some(()) = self.check_rvalue(rvalue) else { return };
@@ -757,11 +731,11 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> {
if let Some(()) = self.eval_rvalue_with_identities(rvalue, *place) {
// If this was already an evaluated constant, keep it.
if let Rvalue::Use(Operand::Constant(c)) = rvalue
- && let ConstantKind::Val(..) = c.literal
+ && let Const::Val(..) = c.const_
{
trace!("skipping replace of Rvalue::Use({:?} because it is already a const", c);
} else if let Some(operand) = self.replace_with_const(*place) {
- *rvalue = Rvalue::Use(operand);
+ self.patch.assignments.insert(location, operand);
}
} else {
// Const prop failed, so erase the destination, ensuring that whatever happens
@@ -785,7 +759,7 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> {
}
}
- fn visit_statement(&mut self, statement: &mut Statement<'tcx>, location: Location) {
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
trace!("visit_statement: {:?}", statement);
// We want to evaluate operands before any change to the assigned-to value,
@@ -829,7 +803,7 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> {
}
}
- fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
+ fn visit_basic_block_data(&mut self, block: BasicBlock, data: &BasicBlockData<'tcx>) {
self.super_basic_block_data(block, data);
// We remove all Locals which are restricted in propagation to their containing blocks and
diff --git a/compiler/rustc_mir_transform/src/const_prop_lint.rs b/compiler/rustc_mir_transform/src/const_prop_lint.rs
index ac07c2576..64e262c6c 100644
--- a/compiler/rustc_mir_transform/src/const_prop_lint.rs
+++ b/compiler/rustc_mir_transform/src/const_prop_lint.rs
@@ -7,7 +7,7 @@ use either::Left;
use rustc_const_eval::interpret::Immediate;
use rustc_const_eval::interpret::{
- self, InterpCx, InterpResult, LocalValue, MemoryKind, OpTy, Scalar, StackPopCleanup,
+ InterpCx, InterpResult, MemoryKind, OpTy, Scalar, StackPopCleanup,
};
use rustc_const_eval::ReportErrorExt;
use rustc_hir::def::DefKind;
@@ -39,6 +39,10 @@ pub struct ConstProp;
impl<'tcx> MirLint<'tcx> for ConstProp {
fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
+ if body.tainted_by_errors.is_some() {
+ return;
+ }
+
// will be evaluated by miri and produce its errors there
if body.source.promoted.is_some() {
return;
@@ -101,25 +105,12 @@ impl<'tcx> MirLint<'tcx> for ConstProp {
trace!("ConstProp starting for {:?}", def_id);
- let dummy_body = &Body::new(
- body.source,
- (*body.basic_blocks).to_owned(),
- body.source_scopes.clone(),
- body.local_decls.clone(),
- Default::default(),
- body.arg_count,
- Default::default(),
- body.span,
- body.generator_kind(),
- body.tainted_by_errors,
- );
-
// FIXME(oli-obk, eddyb) Optimize locals (or even local paths) to hold
// constants, instead of just checking for const-folding succeeding.
// That would require a uniform one-def no-mutation analysis
// and RPO (or recursing when needing the value of a local).
- let mut optimization_finder = ConstPropagator::new(body, dummy_body, tcx);
- optimization_finder.visit_body(body);
+ let mut linter = ConstPropagator::new(body, tcx);
+ linter.visit_body(body);
trace!("ConstProp done for {:?}", def_id);
}
@@ -165,11 +156,7 @@ impl<'tcx> ty::layout::HasParamEnv<'tcx> for ConstPropagator<'_, 'tcx> {
}
impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
- fn new(
- body: &Body<'tcx>,
- dummy_body: &'mir Body<'tcx>,
- tcx: TyCtxt<'tcx>,
- ) -> ConstPropagator<'mir, 'tcx> {
+ fn new(body: &'mir Body<'tcx>, tcx: TyCtxt<'tcx>) -> ConstPropagator<'mir, 'tcx> {
let def_id = body.source.def_id();
let args = &GenericArgs::identity_for_item(tcx, def_id);
let param_env = tcx.param_env_reveal_all_normalized(def_id);
@@ -200,12 +187,21 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
ecx.push_stack_frame(
Instance::new(def_id, args),
- dummy_body,
+ body,
&ret,
StackPopCleanup::Root { cleanup: false },
)
.expect("failed to push initial stack frame");
+ for local in body.local_decls.indices() {
+ // Mark everything initially live.
+ // This is somewhat dicey since some of them might be unsized and it is incoherent to
+ // mark those as live... We rely on `local_to_place`/`local_to_op` in the interpreter
+ // stopping us before those unsized immediates can cause issues deeper in the
+ // interpreter.
+ ecx.frame_mut().locals[local].make_live_uninit();
+ }
+
ConstPropagator {
ecx,
tcx,
@@ -226,7 +222,11 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
fn get_const(&self, place: Place<'tcx>) -> Option<OpTy<'tcx>> {
let op = match self.ecx.eval_place_to_op(place, None) {
Ok(op) => {
- if matches!(*op, interpret::Operand::Immediate(Immediate::Uninit)) {
+ if op
+ .as_mplace_or_imm()
+ .right()
+ .is_some_and(|imm| matches!(*imm, Immediate::Uninit))
+ {
// Make sure nobody accidentally uses this value.
return None;
}
@@ -249,8 +249,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
/// Remove `local` from the pool of `Locals`. Allows writing to them,
/// but not reading from them anymore.
fn remove_const(ecx: &mut InterpCx<'mir, 'tcx, ConstPropMachine<'mir, 'tcx>>, local: Local) {
- ecx.frame_mut().locals[local].value =
- LocalValue::Live(interpret::Operand::Immediate(interpret::Immediate::Uninit));
+ ecx.frame_mut().locals[local].make_live_uninit();
ecx.machine.written_only_inside_own_block_locals.remove(&local);
}
@@ -273,7 +272,8 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
// dedicated error variants should be introduced instead.
assert!(
!error.kind().formatted_string(),
- "const-prop encountered formatting error: {error:?}",
+ "const-prop encountered formatting error: {}",
+ self.ecx.format_error(error),
);
None
}
@@ -281,7 +281,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
}
/// Returns the value, if any, of evaluating `c`.
- fn eval_constant(&mut self, c: &Constant<'tcx>, location: Location) -> Option<OpTy<'tcx>> {
+ fn eval_constant(&mut self, c: &ConstOperand<'tcx>, location: Location) -> Option<OpTy<'tcx>> {
// FIXME we need to revisit this for #67176
if c.has_param() {
return None;
@@ -293,7 +293,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
// that the `RevealAll` pass has happened and that the body's consts
// are normalized, so any call to resolve before that needs to be
// manually normalized.
- let val = self.tcx.try_normalize_erasing_regions(self.param_env, c.literal).ok()?;
+ let val = self.tcx.try_normalize_erasing_regions(self.param_env, c.const_).ok()?;
self.use_ecx(location, |this| this.ecx.eval_mir_constant(&val, Some(c.span), None))
}
@@ -322,7 +322,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
fn check_unary_op(&mut self, op: UnOp, arg: &Operand<'tcx>, location: Location) -> Option<()> {
if let (val, true) = self.use_ecx(location, |this| {
let val = this.ecx.read_immediate(&this.ecx.eval_operand(arg, None)?)?;
- let (_res, overflow, _ty) = this.ecx.overflowing_unary_op(op, &val)?;
+ let (_res, overflow) = this.ecx.overflowing_unary_op(op, &val)?;
Ok((val, overflow))
})? {
// `AssertKind` only has an `OverflowNeg` variant, so make sure that is
@@ -390,7 +390,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
if let (Some(l), Some(r)) = (l, r) {
// The remaining operators are handled through `overflowing_binary_op`.
if self.use_ecx(location, |this| {
- let (_res, overflow, _ty) = this.ecx.overflowing_binary_op(op, &l, &r)?;
+ let (_res, overflow) = this.ecx.overflowing_binary_op(op, &l, &r)?;
Ok(overflow)
})? {
let source_info = self.body().source_info(location);
@@ -580,7 +580,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
self.super_operand(operand, location);
}
- fn visit_constant(&mut self, constant: &Constant<'tcx>, location: Location) {
+ fn visit_constant(&mut self, constant: &ConstOperand<'tcx>, location: Location) {
trace!("visit_constant: {:?}", constant);
self.super_constant(constant, location);
self.eval_constant(constant, location);
@@ -645,12 +645,12 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
}
StatementKind::StorageLive(local) => {
let frame = self.ecx.frame_mut();
- frame.locals[local].value =
- LocalValue::Live(interpret::Operand::Immediate(interpret::Immediate::Uninit));
+ frame.locals[local].make_live_uninit();
}
StatementKind::StorageDead(local) => {
let frame = self.ecx.frame_mut();
- frame.locals[local].value = LocalValue::Dead;
+ // We don't actually track liveness, so the local remains live. But forget its value.
+ frame.locals[local].make_live_uninit();
}
_ => {}
}
@@ -678,8 +678,8 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
}
// None of these have Operands to const-propagate.
TerminatorKind::Goto { .. }
- | TerminatorKind::Resume
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindResume
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::Drop { .. }
diff --git a/compiler/rustc_mir_transform/src/copy_prop.rs b/compiler/rustc_mir_transform/src/copy_prop.rs
index 9a3798eea..9c38a6f81 100644
--- a/compiler/rustc_mir_transform/src/copy_prop.rs
+++ b/compiler/rustc_mir_transform/src/copy_prop.rs
@@ -131,7 +131,7 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'_, 'tcx> {
let observes_address = match ctxt {
PlaceContext::NonMutatingUse(
NonMutatingUseContext::SharedBorrow
- | NonMutatingUseContext::ShallowBorrow
+ | NonMutatingUseContext::FakeBorrow
| NonMutatingUseContext::AddressOf,
) => true,
// For debuginfo, merging locals is ok.
diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs
index 3d442e5dc..d56d4ad4f 100644
--- a/compiler/rustc_mir_transform/src/coverage/counters.rs
+++ b/compiler/rustc_mir_transform/src/coverage/counters.rs
@@ -1,10 +1,8 @@
use super::Error;
-use super::debug;
use super::graph;
use super::spans;
-use debug::{DebugCounters, NESTED_INDENT};
use graph::{BasicCoverageBlock, BcbBranch, CoverageGraph, TraverseCoverageGraphWithLoops};
use spans::CoverageSpan;
@@ -16,6 +14,8 @@ use rustc_middle::mir::coverage::*;
use std::fmt::{self, Debug};
+const NESTED_INDENT: &str = " ";
+
/// The coverage counter or counter expression associated with a particular
/// BCB node or BCB edge.
#[derive(Clone)]
@@ -75,8 +75,6 @@ pub(super) struct CoverageCounters {
/// BCB/edge, but are needed as operands to more complex expressions.
/// These are always [`BcbCounter::Expression`].
pub(super) intermediate_expressions: Vec<BcbCounter>,
-
- pub debug_counters: DebugCounters,
}
impl CoverageCounters {
@@ -91,17 +89,9 @@ impl CoverageCounters {
bcb_edge_counters: FxHashMap::default(),
bcb_has_incoming_edge_counters: BitSet::new_empty(num_bcbs),
intermediate_expressions: Vec::new(),
-
- debug_counters: DebugCounters::new(),
}
}
- /// Activate the `DebugCounters` data structures, to provide additional debug formatting
- /// features when formatting [`BcbCounter`] (counter) values.
- pub fn enable_debug(&mut self) {
- self.debug_counters.enable();
- }
-
/// Makes [`BcbCounter`] `Counter`s and `Expressions` for the `BasicCoverageBlock`s directly or
/// indirectly associated with `CoverageSpans`, and accumulates additional `Expression`s
/// representing intermediate values.
@@ -113,44 +103,18 @@ impl CoverageCounters {
MakeBcbCounters::new(self, basic_coverage_blocks).make_bcb_counters(coverage_spans)
}
- fn make_counter<F>(&mut self, debug_block_label_fn: F) -> BcbCounter
- where
- F: Fn() -> Option<String>,
- {
- let counter = BcbCounter::Counter { id: self.next_counter() };
- if self.debug_counters.is_enabled() {
- self.debug_counters.add_counter(&counter, (debug_block_label_fn)());
- }
- counter
+ fn make_counter(&mut self) -> BcbCounter {
+ let id = self.next_counter();
+ BcbCounter::Counter { id }
}
- fn make_expression<F>(
- &mut self,
- lhs: Operand,
- op: Op,
- rhs: Operand,
- debug_block_label_fn: F,
- ) -> BcbCounter
- where
- F: Fn() -> Option<String>,
- {
+ fn make_expression(&mut self, lhs: Operand, op: Op, rhs: Operand) -> BcbCounter {
let id = self.next_expression();
- let expression = BcbCounter::Expression { id, lhs, op, rhs };
- if self.debug_counters.is_enabled() {
- self.debug_counters.add_counter(&expression, (debug_block_label_fn)());
- }
- expression
+ BcbCounter::Expression { id, lhs, op, rhs }
}
pub fn make_identity_counter(&mut self, counter_operand: Operand) -> BcbCounter {
- let some_debug_block_label = if self.debug_counters.is_enabled() {
- self.debug_counters.some_block_label(counter_operand).cloned()
- } else {
- None
- };
- self.make_expression(counter_operand, Op::Add, Operand::Zero, || {
- some_debug_block_label.clone()
- })
+ self.make_expression(counter_operand, Op::Add, Operand::Zero)
}
/// Counter IDs start from one and go up.
@@ -367,12 +331,8 @@ impl<'a> MakeBcbCounters<'a> {
branch_counter_operand,
Op::Add,
sumup_counter_operand,
- || None,
- );
- debug!(
- " [new intermediate expression: {}]",
- self.format_counter(&intermediate_expression)
);
+ debug!(" [new intermediate expression: {:?}]", intermediate_expression);
let intermediate_expression_operand = intermediate_expression.as_operand();
self.coverage_counters.intermediate_expressions.push(intermediate_expression);
some_sumup_counter_operand.replace(intermediate_expression_operand);
@@ -394,9 +354,8 @@ impl<'a> MakeBcbCounters<'a> {
branching_counter_operand,
Op::Subtract,
sumup_counter_operand,
- || Some(format!("{expression_branch:?}")),
);
- debug!("{:?} gets an expression: {}", expression_branch, self.format_counter(&expression));
+ debug!("{:?} gets an expression: {:?}", expression_branch, expression);
let bcb = expression_branch.target_bcb;
if expression_branch.is_only_path_to_target() {
self.coverage_counters.set_bcb_counter(bcb, expression)?;
@@ -418,10 +377,10 @@ impl<'a> MakeBcbCounters<'a> {
// If the BCB already has a counter, return it.
if let Some(counter_kind) = &self.coverage_counters.bcb_counters[bcb] {
debug!(
- "{}{:?} already has a counter: {}",
+ "{}{:?} already has a counter: {:?}",
NESTED_INDENT.repeat(debug_indent_level),
bcb,
- self.format_counter(counter_kind),
+ counter_kind,
);
return Ok(counter_kind.as_operand());
}
@@ -431,22 +390,22 @@ impl<'a> MakeBcbCounters<'a> {
// program results in a tight infinite loop, but it should still compile.
let one_path_to_target = self.bcb_has_one_path_to_target(bcb);
if one_path_to_target || self.bcb_predecessors(bcb).contains(&bcb) {
- let counter_kind = self.coverage_counters.make_counter(|| Some(format!("{bcb:?}")));
+ let counter_kind = self.coverage_counters.make_counter();
if one_path_to_target {
debug!(
- "{}{:?} gets a new counter: {}",
+ "{}{:?} gets a new counter: {:?}",
NESTED_INDENT.repeat(debug_indent_level),
bcb,
- self.format_counter(&counter_kind),
+ counter_kind,
);
} else {
debug!(
"{}{:?} has itself as its own predecessor. It can't be part of its own \
- Expression sum, so it will get its own new counter: {}. (Note, the compiled \
+ Expression sum, so it will get its own new counter: {:?}. (Note, the compiled \
code will generate an infinite loop.)",
NESTED_INDENT.repeat(debug_indent_level),
bcb,
- self.format_counter(&counter_kind),
+ counter_kind,
);
}
return self.coverage_counters.set_bcb_counter(bcb, counter_kind);
@@ -481,12 +440,11 @@ impl<'a> MakeBcbCounters<'a> {
sumup_edge_counter_operand,
Op::Add,
edge_counter_operand,
- || None,
);
debug!(
- "{}new intermediate expression: {}",
+ "{}new intermediate expression: {:?}",
NESTED_INDENT.repeat(debug_indent_level),
- self.format_counter(&intermediate_expression)
+ intermediate_expression
);
let intermediate_expression_operand = intermediate_expression.as_operand();
self.coverage_counters.intermediate_expressions.push(intermediate_expression);
@@ -497,13 +455,12 @@ impl<'a> MakeBcbCounters<'a> {
first_edge_counter_operand,
Op::Add,
some_sumup_edge_counter_operand.unwrap(),
- || Some(format!("{bcb:?}")),
);
debug!(
- "{}{:?} gets a new counter (sum of predecessor counters): {}",
+ "{}{:?} gets a new counter (sum of predecessor counters): {:?}",
NESTED_INDENT.repeat(debug_indent_level),
bcb,
- self.format_counter(&counter_kind)
+ counter_kind
);
self.coverage_counters.set_bcb_counter(bcb, counter_kind)
}
@@ -534,24 +491,23 @@ impl<'a> MakeBcbCounters<'a> {
self.coverage_counters.bcb_edge_counters.get(&(from_bcb, to_bcb))
{
debug!(
- "{}Edge {:?}->{:?} already has a counter: {}",
+ "{}Edge {:?}->{:?} already has a counter: {:?}",
NESTED_INDENT.repeat(debug_indent_level),
from_bcb,
to_bcb,
- self.format_counter(counter_kind)
+ counter_kind
);
return Ok(counter_kind.as_operand());
}
// Make a new counter to count this edge.
- let counter_kind =
- self.coverage_counters.make_counter(|| Some(format!("{from_bcb:?}->{to_bcb:?}")));
+ let counter_kind = self.coverage_counters.make_counter();
debug!(
- "{}Edge {:?}->{:?} gets a new counter: {}",
+ "{}Edge {:?}->{:?} gets a new counter: {:?}",
NESTED_INDENT.repeat(debug_indent_level),
from_bcb,
to_bcb,
- self.format_counter(&counter_kind)
+ counter_kind
);
self.coverage_counters.set_bcb_edge_counter(from_bcb, to_bcb, counter_kind)
}
@@ -710,9 +666,4 @@ impl<'a> MakeBcbCounters<'a> {
fn bcb_dominates(&self, dom: BasicCoverageBlock, node: BasicCoverageBlock) -> bool {
self.basic_coverage_blocks.dominates(dom, node)
}
-
- #[inline]
- fn format_counter(&self, counter_kind: &BcbCounter) -> String {
- self.coverage_counters.debug_counters.format_counter(counter_kind)
- }
}
diff --git a/compiler/rustc_mir_transform/src/coverage/debug.rs b/compiler/rustc_mir_transform/src/coverage/debug.rs
deleted file mode 100644
index af616c498..000000000
--- a/compiler/rustc_mir_transform/src/coverage/debug.rs
+++ /dev/null
@@ -1,802 +0,0 @@
-//! The `InstrumentCoverage` MIR pass implementation includes debugging tools and options
-//! to help developers understand and/or improve the analysis and instrumentation of a MIR.
-//!
-//! To enable coverage, include the rustc command line option:
-//!
-//! * `-C instrument-coverage`
-//!
-//! MIR Dump Files, with additional `CoverageGraph` graphviz and `CoverageSpan` spanview
-//! ------------------------------------------------------------------------------------
-//!
-//! Additional debugging options include:
-//!
-//! * `-Z dump-mir=InstrumentCoverage` - Generate `.mir` files showing the state of the MIR,
-//! before and after the `InstrumentCoverage` pass, for each compiled function.
-//!
-//! * `-Z dump-mir-graphviz` - If `-Z dump-mir` is also enabled for the current MIR node path,
-//! each MIR dump is accompanied by a before-and-after graphical view of the MIR, in Graphviz
-//! `.dot` file format (which can be visually rendered as a graph using any of a number of free
-//! Graphviz viewers and IDE extensions).
-//!
-//! For the `InstrumentCoverage` pass, this option also enables generation of an additional
-//! Graphviz `.dot` file for each function, rendering the `CoverageGraph`: the control flow
-//! graph (CFG) of `BasicCoverageBlocks` (BCBs), as nodes, internally labeled to show the
-//! `CoverageSpan`-based MIR elements each BCB represents (`BasicBlock`s, `Statement`s and
-//! `Terminator`s), assigned coverage counters and/or expressions, and edge counters, as needed.
-//!
-//! (Note the additional option, `-Z graphviz-dark-mode`, can be added, to change the rendered
-//! output from its default black-on-white background to a dark color theme, if desired.)
-//!
-//! * `-Z dump-mir-spanview` - If `-Z dump-mir` is also enabled for the current MIR node path,
-//! each MIR dump is accompanied by a before-and-after `.html` document showing the function's
-//! original source code, highlighted by it's MIR spans, at the `statement`-level (by default),
-//! `terminator` only, or encompassing span for the `Terminator` plus all `Statement`s, in each
-//! `block` (`BasicBlock`).
-//!
-//! For the `InstrumentCoverage` pass, this option also enables generation of an additional
-//! spanview `.html` file for each function, showing the aggregated `CoverageSpan`s that will
-//! require counters (or counter expressions) for accurate coverage analysis.
-//!
-//! Debug Logging
-//! -------------
-//!
-//! The `InstrumentCoverage` pass includes debug logging messages at various phases and decision
-//! points, which can be enabled via environment variable:
-//!
-//! ```shell
-//! RUSTC_LOG=rustc_mir_transform::transform::coverage=debug
-//! ```
-//!
-//! Other module paths with coverage-related debug logs may also be of interest, particularly for
-//! debugging the coverage map data, injected as global variables in the LLVM IR (during rustc's
-//! code generation pass). For example:
-//!
-//! ```shell
-//! RUSTC_LOG=rustc_mir_transform::transform::coverage,rustc_codegen_ssa::coverageinfo,rustc_codegen_llvm::coverageinfo=debug
-//! ```
-//!
-//! Coverage Debug Options
-//! ---------------------------------
-//!
-//! Additional debugging options can be enabled using the environment variable:
-//!
-//! ```shell
-//! RUSTC_COVERAGE_DEBUG_OPTIONS=<options>
-//! ```
-//!
-//! These options are comma-separated, and specified in the format `option-name=value`. For example:
-//!
-//! ```shell
-//! $ RUSTC_COVERAGE_DEBUG_OPTIONS=counter-format=id+operation,allow-unused-expressions=yes cargo build
-//! ```
-//!
-//! Coverage debug options include:
-//!
-//! * `allow-unused-expressions=yes` or `no` (default: `no`)
-//!
-//! The `InstrumentCoverage` algorithms _should_ only create and assign expressions to a
-//! `BasicCoverageBlock`, or an incoming edge, if that expression is either (a) required to
-//! count a `CoverageSpan`, or (b) a dependency of some other required counter expression.
-//!
-//! If an expression is generated that does not map to a `CoverageSpan` or dependency, this
-//! probably indicates there was a bug in the algorithm that creates and assigns counters
-//! and expressions.
-//!
-//! When this kind of bug is encountered, the rustc compiler will panic by default. Setting:
-//! `allow-unused-expressions=yes` will log a warning message instead of panicking (effectively
-//! ignoring the unused expressions), which may be helpful when debugging the root cause of
-//! the problem.
-//!
-//! * `counter-format=<choices>`, where `<choices>` can be any plus-separated combination of `id`,
-//! `block`, and/or `operation` (default: `block+operation`)
-//!
-//! This option effects both the `CoverageGraph` (graphviz `.dot` files) and debug logging, when
-//! generating labels for counters and expressions.
-//!
-//! Depending on the values and combinations, counters can be labeled by:
-//!
-//! * `id` - counter or expression ID (ascending counter IDs, starting at 1, or descending
-//! expression IDs, starting at `u32:MAX`)
-//! * `block` - the `BasicCoverageBlock` label (for example, `bcb0`) or edge label (for
-//! example `bcb0->bcb1`), for counters or expressions assigned to count a
-//! `BasicCoverageBlock` or edge. Intermediate expressions (not directly associated with
-//! a BCB or edge) will be labeled by their expression ID, unless `operation` is also
-//! specified.
-//! * `operation` - applied to expressions only, labels include the left-hand-side counter
-//! or expression label (lhs operand), the operator (`+` or `-`), and the right-hand-side
-//! counter or expression (rhs operand). Expression operand labels are generated
-//! recursively, generating labels with nested operations, enclosed in parentheses
-//! (for example: `bcb2 + (bcb0 - bcb1)`).
-
-use super::counters::{BcbCounter, CoverageCounters};
-use super::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph};
-use super::spans::CoverageSpan;
-
-use itertools::Itertools;
-use rustc_middle::mir::create_dump_file;
-use rustc_middle::mir::generic_graphviz::GraphvizWriter;
-use rustc_middle::mir::spanview::{self, SpanViewable};
-
-use rustc_data_structures::fx::FxHashMap;
-use rustc_middle::mir::coverage::*;
-use rustc_middle::mir::{self, BasicBlock};
-use rustc_middle::ty::TyCtxt;
-use rustc_span::Span;
-
-use std::iter;
-use std::ops::Deref;
-use std::sync::OnceLock;
-
-pub const NESTED_INDENT: &str = " ";
-
-const RUSTC_COVERAGE_DEBUG_OPTIONS: &str = "RUSTC_COVERAGE_DEBUG_OPTIONS";
-
-pub(super) fn debug_options<'a>() -> &'a DebugOptions {
- static DEBUG_OPTIONS: OnceLock<DebugOptions> = OnceLock::new();
-
- &DEBUG_OPTIONS.get_or_init(DebugOptions::from_env)
-}
-
-/// Parses and maintains coverage-specific debug options captured from the environment variable
-/// "RUSTC_COVERAGE_DEBUG_OPTIONS", if set.
-#[derive(Debug, Clone)]
-pub(super) struct DebugOptions {
- pub allow_unused_expressions: bool,
- counter_format: ExpressionFormat,
-}
-
-impl DebugOptions {
- fn from_env() -> Self {
- let mut allow_unused_expressions = true;
- let mut counter_format = ExpressionFormat::default();
-
- if let Ok(env_debug_options) = std::env::var(RUSTC_COVERAGE_DEBUG_OPTIONS) {
- for setting_str in env_debug_options.replace(' ', "").replace('-', "_").split(',') {
- let (option, value) = match setting_str.split_once('=') {
- None => (setting_str, None),
- Some((k, v)) => (k, Some(v)),
- };
- match option {
- "allow_unused_expressions" => {
- allow_unused_expressions = bool_option_val(option, value);
- debug!(
- "{} env option `allow_unused_expressions` is set to {}",
- RUSTC_COVERAGE_DEBUG_OPTIONS, allow_unused_expressions
- );
- }
- "counter_format" => {
- match value {
- None => {
- bug!(
- "`{}` option in environment variable {} requires one or more \
- plus-separated choices (a non-empty subset of \
- `id+block+operation`)",
- option,
- RUSTC_COVERAGE_DEBUG_OPTIONS
- );
- }
- Some(val) => {
- counter_format = counter_format_option_val(val);
- debug!(
- "{} env option `counter_format` is set to {:?}",
- RUSTC_COVERAGE_DEBUG_OPTIONS, counter_format
- );
- }
- };
- }
- _ => bug!(
- "Unsupported setting `{}` in environment variable {}",
- option,
- RUSTC_COVERAGE_DEBUG_OPTIONS
- ),
- };
- }
- }
-
- Self { allow_unused_expressions, counter_format }
- }
-}
-
-fn bool_option_val(option: &str, some_strval: Option<&str>) -> bool {
- if let Some(val) = some_strval {
- if ["yes", "y", "on", "true"].contains(&val) {
- true
- } else if ["no", "n", "off", "false"].contains(&val) {
- false
- } else {
- bug!(
- "Unsupported value `{}` for option `{}` in environment variable {}",
- option,
- val,
- RUSTC_COVERAGE_DEBUG_OPTIONS
- )
- }
- } else {
- true
- }
-}
-
-fn counter_format_option_val(strval: &str) -> ExpressionFormat {
- let mut counter_format = ExpressionFormat { id: false, block: false, operation: false };
- let components = strval.splitn(3, '+');
- for component in components {
- match component {
- "id" => counter_format.id = true,
- "block" => counter_format.block = true,
- "operation" => counter_format.operation = true,
- _ => bug!(
- "Unsupported counter_format choice `{}` in environment variable {}",
- component,
- RUSTC_COVERAGE_DEBUG_OPTIONS
- ),
- }
- }
- counter_format
-}
-
-#[derive(Debug, Clone)]
-struct ExpressionFormat {
- id: bool,
- block: bool,
- operation: bool,
-}
-
-impl Default for ExpressionFormat {
- fn default() -> Self {
- Self { id: false, block: true, operation: true }
- }
-}
-
-/// If enabled, this struct maintains a map from `BcbCounter` IDs (as `Operand`) to
-/// the `BcbCounter` data and optional label (normally, the counter's associated
-/// `BasicCoverageBlock` format string, if any).
-///
-/// Use `format_counter` to convert one of these `BcbCounter` counters to a debug output string,
-/// as directed by the `DebugOptions`. This allows the format of counter labels in logs and dump
-/// files (including the `CoverageGraph` graphviz file) to be changed at runtime, via environment
-/// variable.
-///
-/// `DebugCounters` supports a recursive rendering of `Expression` counters, so they can be
-/// presented as nested expressions such as `(bcb3 - (bcb0 + bcb1))`.
-pub(super) struct DebugCounters {
- some_counters: Option<FxHashMap<Operand, DebugCounter>>,
-}
-
-impl DebugCounters {
- pub fn new() -> Self {
- Self { some_counters: None }
- }
-
- pub fn enable(&mut self) {
- debug_assert!(!self.is_enabled());
- self.some_counters.replace(FxHashMap::default());
- }
-
- pub fn is_enabled(&self) -> bool {
- self.some_counters.is_some()
- }
-
- pub fn add_counter(&mut self, counter_kind: &BcbCounter, some_block_label: Option<String>) {
- if let Some(counters) = &mut self.some_counters {
- let id = counter_kind.as_operand();
- counters
- .try_insert(id, DebugCounter::new(counter_kind.clone(), some_block_label))
- .expect("attempt to add the same counter_kind to DebugCounters more than once");
- }
- }
-
- pub fn some_block_label(&self, operand: Operand) -> Option<&String> {
- self.some_counters.as_ref().and_then(|counters| {
- counters.get(&operand).and_then(|debug_counter| debug_counter.some_block_label.as_ref())
- })
- }
-
- pub fn format_counter(&self, counter_kind: &BcbCounter) -> String {
- match *counter_kind {
- BcbCounter::Counter { .. } => {
- format!("Counter({})", self.format_counter_kind(counter_kind))
- }
- BcbCounter::Expression { .. } => {
- format!("Expression({})", self.format_counter_kind(counter_kind))
- }
- }
- }
-
- fn format_counter_kind(&self, counter_kind: &BcbCounter) -> String {
- let counter_format = &debug_options().counter_format;
- if let BcbCounter::Expression { id, lhs, op, rhs } = *counter_kind {
- if counter_format.operation {
- return format!(
- "{}{} {} {}",
- if counter_format.id || self.some_counters.is_none() {
- format!("#{} = ", id.index())
- } else {
- String::new()
- },
- self.format_operand(lhs),
- match op {
- Op::Add => "+",
- Op::Subtract => "-",
- },
- self.format_operand(rhs),
- );
- }
- }
-
- let id = counter_kind.as_operand();
- if self.some_counters.is_some() && (counter_format.block || !counter_format.id) {
- let counters = self.some_counters.as_ref().unwrap();
- if let Some(DebugCounter { some_block_label: Some(block_label), .. }) =
- counters.get(&id)
- {
- return if counter_format.id {
- format!("{}#{:?}", block_label, id)
- } else {
- block_label.to_string()
- };
- }
- }
- format!("#{:?}", id)
- }
-
- fn format_operand(&self, operand: Operand) -> String {
- if matches!(operand, Operand::Zero) {
- return String::from("0");
- }
- if let Some(counters) = &self.some_counters {
- if let Some(DebugCounter { counter_kind, some_block_label }) = counters.get(&operand) {
- if let BcbCounter::Expression { .. } = counter_kind {
- if let Some(label) = some_block_label && debug_options().counter_format.block {
- return format!(
- "{}:({})",
- label,
- self.format_counter_kind(counter_kind)
- );
- }
- return format!("({})", self.format_counter_kind(counter_kind));
- }
- return self.format_counter_kind(counter_kind);
- }
- }
- format!("#{:?}", operand)
- }
-}
-
-/// A non-public support class to `DebugCounters`.
-#[derive(Debug)]
-struct DebugCounter {
- counter_kind: BcbCounter,
- some_block_label: Option<String>,
-}
-
-impl DebugCounter {
- fn new(counter_kind: BcbCounter, some_block_label: Option<String>) -> Self {
- Self { counter_kind, some_block_label }
- }
-}
-
-/// If enabled, this data structure captures additional debugging information used when generating
-/// a Graphviz (.dot file) representation of the `CoverageGraph`, for debugging purposes.
-pub(super) struct GraphvizData {
- some_bcb_to_coverage_spans_with_counters:
- Option<FxHashMap<BasicCoverageBlock, Vec<(CoverageSpan, BcbCounter)>>>,
- some_bcb_to_dependency_counters: Option<FxHashMap<BasicCoverageBlock, Vec<BcbCounter>>>,
- some_edge_to_counter: Option<FxHashMap<(BasicCoverageBlock, BasicBlock), BcbCounter>>,
-}
-
-impl GraphvizData {
- pub fn new() -> Self {
- Self {
- some_bcb_to_coverage_spans_with_counters: None,
- some_bcb_to_dependency_counters: None,
- some_edge_to_counter: None,
- }
- }
-
- pub fn enable(&mut self) {
- debug_assert!(!self.is_enabled());
- self.some_bcb_to_coverage_spans_with_counters = Some(FxHashMap::default());
- self.some_bcb_to_dependency_counters = Some(FxHashMap::default());
- self.some_edge_to_counter = Some(FxHashMap::default());
- }
-
- pub fn is_enabled(&self) -> bool {
- self.some_bcb_to_coverage_spans_with_counters.is_some()
- }
-
- pub fn add_bcb_coverage_span_with_counter(
- &mut self,
- bcb: BasicCoverageBlock,
- coverage_span: &CoverageSpan,
- counter_kind: &BcbCounter,
- ) {
- if let Some(bcb_to_coverage_spans_with_counters) =
- self.some_bcb_to_coverage_spans_with_counters.as_mut()
- {
- bcb_to_coverage_spans_with_counters
- .entry(bcb)
- .or_insert_with(Vec::new)
- .push((coverage_span.clone(), counter_kind.clone()));
- }
- }
-
- pub fn get_bcb_coverage_spans_with_counters(
- &self,
- bcb: BasicCoverageBlock,
- ) -> Option<&[(CoverageSpan, BcbCounter)]> {
- if let Some(bcb_to_coverage_spans_with_counters) =
- self.some_bcb_to_coverage_spans_with_counters.as_ref()
- {
- bcb_to_coverage_spans_with_counters.get(&bcb).map(Deref::deref)
- } else {
- None
- }
- }
-
- pub fn add_bcb_dependency_counter(
- &mut self,
- bcb: BasicCoverageBlock,
- counter_kind: &BcbCounter,
- ) {
- if let Some(bcb_to_dependency_counters) = self.some_bcb_to_dependency_counters.as_mut() {
- bcb_to_dependency_counters
- .entry(bcb)
- .or_insert_with(Vec::new)
- .push(counter_kind.clone());
- }
- }
-
- pub fn get_bcb_dependency_counters(&self, bcb: BasicCoverageBlock) -> Option<&[BcbCounter]> {
- if let Some(bcb_to_dependency_counters) = self.some_bcb_to_dependency_counters.as_ref() {
- bcb_to_dependency_counters.get(&bcb).map(Deref::deref)
- } else {
- None
- }
- }
-
- pub fn set_edge_counter(
- &mut self,
- from_bcb: BasicCoverageBlock,
- to_bb: BasicBlock,
- counter_kind: &BcbCounter,
- ) {
- if let Some(edge_to_counter) = self.some_edge_to_counter.as_mut() {
- edge_to_counter
- .try_insert((from_bcb, to_bb), counter_kind.clone())
- .expect("invalid attempt to insert more than one edge counter for the same edge");
- }
- }
-
- pub fn get_edge_counter(
- &self,
- from_bcb: BasicCoverageBlock,
- to_bb: BasicBlock,
- ) -> Option<&BcbCounter> {
- if let Some(edge_to_counter) = self.some_edge_to_counter.as_ref() {
- edge_to_counter.get(&(from_bcb, to_bb))
- } else {
- None
- }
- }
-}
-
-/// If enabled, this struct captures additional data used to track whether expressions were used,
-/// directly or indirectly, to compute the coverage counts for all `CoverageSpan`s, and any that are
-/// _not_ used are retained in the `unused_expressions` Vec, to be included in debug output (logs
-/// and/or a `CoverageGraph` graphviz output).
-pub(super) struct UsedExpressions {
- some_used_expression_operands: Option<FxHashMap<Operand, Vec<ExpressionId>>>,
- some_unused_expressions:
- Option<Vec<(BcbCounter, Option<BasicCoverageBlock>, BasicCoverageBlock)>>,
-}
-
-impl UsedExpressions {
- pub fn new() -> Self {
- Self { some_used_expression_operands: None, some_unused_expressions: None }
- }
-
- pub fn enable(&mut self) {
- debug_assert!(!self.is_enabled());
- self.some_used_expression_operands = Some(FxHashMap::default());
- self.some_unused_expressions = Some(Vec::new());
- }
-
- pub fn is_enabled(&self) -> bool {
- self.some_used_expression_operands.is_some()
- }
-
- pub fn add_expression_operands(&mut self, expression: &BcbCounter) {
- if let Some(used_expression_operands) = self.some_used_expression_operands.as_mut() {
- if let BcbCounter::Expression { id, lhs, rhs, .. } = *expression {
- used_expression_operands.entry(lhs).or_insert_with(Vec::new).push(id);
- used_expression_operands.entry(rhs).or_insert_with(Vec::new).push(id);
- }
- }
- }
-
- pub fn expression_is_used(&self, expression: &BcbCounter) -> bool {
- if let Some(used_expression_operands) = self.some_used_expression_operands.as_ref() {
- used_expression_operands.contains_key(&expression.as_operand())
- } else {
- false
- }
- }
-
- pub fn add_unused_expression_if_not_found(
- &mut self,
- expression: &BcbCounter,
- edge_from_bcb: Option<BasicCoverageBlock>,
- target_bcb: BasicCoverageBlock,
- ) {
- if let Some(used_expression_operands) = self.some_used_expression_operands.as_ref() {
- if !used_expression_operands.contains_key(&expression.as_operand()) {
- self.some_unused_expressions.as_mut().unwrap().push((
- expression.clone(),
- edge_from_bcb,
- target_bcb,
- ));
- }
- }
- }
-
- /// Return the list of unused counters (if any) as a tuple with the counter (`BcbCounter`),
- /// optional `from_bcb` (if it was an edge counter), and `target_bcb`.
- pub fn get_unused_expressions(
- &self,
- ) -> Vec<(BcbCounter, Option<BasicCoverageBlock>, BasicCoverageBlock)> {
- if let Some(unused_expressions) = self.some_unused_expressions.as_ref() {
- unused_expressions.clone()
- } else {
- Vec::new()
- }
- }
-
- /// If enabled, validate that every BCB or edge counter not directly associated with a coverage
- /// span is at least indirectly associated (it is a dependency of a BCB counter that _is_
- /// associated with a coverage span).
- pub fn validate(
- &mut self,
- bcb_counters_without_direct_coverage_spans: &[(
- Option<BasicCoverageBlock>,
- BasicCoverageBlock,
- BcbCounter,
- )],
- ) {
- if self.is_enabled() {
- let mut not_validated = bcb_counters_without_direct_coverage_spans
- .iter()
- .map(|(_, _, counter_kind)| counter_kind)
- .collect::<Vec<_>>();
- let mut validating_count = 0;
- while not_validated.len() != validating_count {
- let to_validate = not_validated.split_off(0);
- validating_count = to_validate.len();
- for counter_kind in to_validate {
- if self.expression_is_used(counter_kind) {
- self.add_expression_operands(counter_kind);
- } else {
- not_validated.push(counter_kind);
- }
- }
- }
- }
- }
-
- pub fn alert_on_unused_expressions(&self, debug_counters: &DebugCounters) {
- if let Some(unused_expressions) = self.some_unused_expressions.as_ref() {
- for (counter_kind, edge_from_bcb, target_bcb) in unused_expressions {
- let unused_counter_message = if let Some(from_bcb) = edge_from_bcb.as_ref() {
- format!(
- "non-coverage edge counter found without a dependent expression, in \
- {:?}->{:?}; counter={}",
- from_bcb,
- target_bcb,
- debug_counters.format_counter(&counter_kind),
- )
- } else {
- format!(
- "non-coverage counter found without a dependent expression, in {:?}; \
- counter={}",
- target_bcb,
- debug_counters.format_counter(&counter_kind),
- )
- };
-
- if debug_options().allow_unused_expressions {
- debug!("WARNING: {}", unused_counter_message);
- } else {
- bug!("{}", unused_counter_message);
- }
- }
- }
- }
-}
-
-/// Generates the MIR pass `CoverageSpan`-specific spanview dump file.
-pub(super) fn dump_coverage_spanview<'tcx>(
- tcx: TyCtxt<'tcx>,
- mir_body: &mir::Body<'tcx>,
- basic_coverage_blocks: &CoverageGraph,
- pass_name: &str,
- body_span: Span,
- coverage_spans: &[CoverageSpan],
-) {
- let mir_source = mir_body.source;
- let def_id = mir_source.def_id();
-
- let span_viewables = span_viewables(tcx, mir_body, basic_coverage_blocks, &coverage_spans);
- let mut file = create_dump_file(tcx, "html", false, pass_name, &0i32, mir_body)
- .expect("Unexpected error creating MIR spanview HTML file");
- let crate_name = tcx.crate_name(def_id.krate);
- let item_name = tcx.def_path(def_id).to_filename_friendly_no_crate();
- let title = format!("{crate_name}.{item_name} - Coverage Spans");
- spanview::write_document(tcx, body_span, span_viewables, &title, &mut file)
- .expect("Unexpected IO error dumping coverage spans as HTML");
-}
-
-/// Converts the computed `BasicCoverageBlockData`s into `SpanViewable`s.
-fn span_viewables<'tcx>(
- tcx: TyCtxt<'tcx>,
- mir_body: &mir::Body<'tcx>,
- basic_coverage_blocks: &CoverageGraph,
- coverage_spans: &[CoverageSpan],
-) -> Vec<SpanViewable> {
- let mut span_viewables = Vec::new();
- for coverage_span in coverage_spans {
- let tooltip = coverage_span.format_coverage_statements(tcx, mir_body);
- let CoverageSpan { span, bcb, .. } = coverage_span;
- let bcb_data = &basic_coverage_blocks[*bcb];
- let id = bcb_data.id();
- let leader_bb = bcb_data.leader_bb();
- span_viewables.push(SpanViewable { bb: leader_bb, span: *span, id, tooltip });
- }
- span_viewables
-}
-
-/// Generates the MIR pass coverage-specific graphviz dump file.
-pub(super) fn dump_coverage_graphviz<'tcx>(
- tcx: TyCtxt<'tcx>,
- mir_body: &mir::Body<'tcx>,
- pass_name: &str,
- basic_coverage_blocks: &CoverageGraph,
- coverage_counters: &CoverageCounters,
- graphviz_data: &GraphvizData,
- intermediate_expressions: &[BcbCounter],
- debug_used_expressions: &UsedExpressions,
-) {
- let debug_counters = &coverage_counters.debug_counters;
-
- let mir_source = mir_body.source;
- let def_id = mir_source.def_id();
- let node_content = |bcb| {
- bcb_to_string_sections(
- tcx,
- mir_body,
- coverage_counters,
- bcb,
- &basic_coverage_blocks[bcb],
- graphviz_data.get_bcb_coverage_spans_with_counters(bcb),
- graphviz_data.get_bcb_dependency_counters(bcb),
- // intermediate_expressions are injected into the mir::START_BLOCK, so
- // include them in the first BCB.
- if bcb.index() == 0 { Some(&intermediate_expressions) } else { None },
- )
- };
- let edge_labels = |from_bcb| {
- let from_bcb_data = &basic_coverage_blocks[from_bcb];
- let from_terminator = from_bcb_data.terminator(mir_body);
- let mut edge_labels = from_terminator.kind.fmt_successor_labels();
- edge_labels.retain(|label| label != "unreachable");
- let edge_counters = from_terminator
- .successors()
- .map(|successor_bb| graphviz_data.get_edge_counter(from_bcb, successor_bb));
- iter::zip(&edge_labels, edge_counters)
- .map(|(label, some_counter)| {
- if let Some(counter) = some_counter {
- format!("{}\n{}", label, debug_counters.format_counter(counter))
- } else {
- label.to_string()
- }
- })
- .collect::<Vec<_>>()
- };
- let graphviz_name = format!("Cov_{}_{}", def_id.krate.index(), def_id.index.index());
- let mut graphviz_writer =
- GraphvizWriter::new(basic_coverage_blocks, &graphviz_name, node_content, edge_labels);
- let unused_expressions = debug_used_expressions.get_unused_expressions();
- if unused_expressions.len() > 0 {
- graphviz_writer.set_graph_label(&format!(
- "Unused expressions:\n {}",
- unused_expressions
- .as_slice()
- .iter()
- .map(|(counter_kind, edge_from_bcb, target_bcb)| {
- if let Some(from_bcb) = edge_from_bcb.as_ref() {
- format!(
- "{:?}->{:?}: {}",
- from_bcb,
- target_bcb,
- debug_counters.format_counter(&counter_kind),
- )
- } else {
- format!(
- "{:?}: {}",
- target_bcb,
- debug_counters.format_counter(&counter_kind),
- )
- }
- })
- .join("\n ")
- ));
- }
- let mut file = create_dump_file(tcx, "dot", false, pass_name, &0i32, mir_body)
- .expect("Unexpected error creating BasicCoverageBlock graphviz DOT file");
- graphviz_writer
- .write_graphviz(tcx, &mut file)
- .expect("Unexpected error writing BasicCoverageBlock graphviz DOT file");
-}
-
-fn bcb_to_string_sections<'tcx>(
- tcx: TyCtxt<'tcx>,
- mir_body: &mir::Body<'tcx>,
- coverage_counters: &CoverageCounters,
- bcb: BasicCoverageBlock,
- bcb_data: &BasicCoverageBlockData,
- some_coverage_spans_with_counters: Option<&[(CoverageSpan, BcbCounter)]>,
- some_dependency_counters: Option<&[BcbCounter]>,
- some_intermediate_expressions: Option<&[BcbCounter]>,
-) -> Vec<String> {
- let debug_counters = &coverage_counters.debug_counters;
-
- let len = bcb_data.basic_blocks.len();
- let mut sections = Vec::new();
- if let Some(collect_intermediate_expressions) = some_intermediate_expressions {
- sections.push(
- collect_intermediate_expressions
- .iter()
- .map(|expression| {
- format!("Intermediate {}", debug_counters.format_counter(expression))
- })
- .join("\n"),
- );
- }
- if let Some(coverage_spans_with_counters) = some_coverage_spans_with_counters {
- sections.push(
- coverage_spans_with_counters
- .iter()
- .map(|(covspan, counter)| {
- format!(
- "{} at {}",
- debug_counters.format_counter(counter),
- covspan.format(tcx, mir_body)
- )
- })
- .join("\n"),
- );
- }
- if let Some(dependency_counters) = some_dependency_counters {
- sections.push(format!(
- "Non-coverage counters:\n {}",
- dependency_counters
- .iter()
- .map(|counter| debug_counters.format_counter(counter))
- .join(" \n"),
- ));
- }
- if let Some(counter_kind) = coverage_counters.bcb_counter(bcb) {
- sections.push(format!("{counter_kind:?}"));
- }
- let non_term_blocks = bcb_data.basic_blocks[0..len - 1]
- .iter()
- .map(|&bb| format!("{:?}: {}", bb, mir_body[bb].terminator().kind.name()))
- .collect::<Vec<_>>();
- if non_term_blocks.len() > 0 {
- sections.push(non_term_blocks.join("\n"));
- }
- sections.push(format!(
- "{:?}: {}",
- bcb_data.basic_blocks.last().unwrap(),
- bcb_data.terminator(mir_body).kind.name(),
- ));
- sections
-}
diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs
index 59b01ffec..ff2254d69 100644
--- a/compiler/rustc_mir_transform/src/coverage/graph.rs
+++ b/compiler/rustc_mir_transform/src/coverage/graph.rs
@@ -1,4 +1,3 @@
-use itertools::Itertools;
use rustc_data_structures::graph::dominators::{self, Dominators};
use rustc_data_structures::graph::{self, GraphSuccessors, WithNumNodes, WithStartNode};
use rustc_index::bit_set::BitSet;
@@ -8,8 +7,6 @@ use rustc_middle::mir::{self, BasicBlock, BasicBlockData, Terminator, Terminator
use std::cmp::Ordering;
use std::ops::{Index, IndexMut};
-const ID_SEPARATOR: &str = ",";
-
/// A coverage-specific simplification of the MIR control flow graph (CFG). The `CoverageGraph`s
/// nodes are `BasicCoverageBlock`s, which encompass one or more MIR `BasicBlock`s.
#[derive(Debug)]
@@ -116,7 +113,7 @@ impl CoverageGraph {
match term.kind {
TerminatorKind::Return { .. }
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Yield { .. }
| TerminatorKind::SwitchInt { .. } => {
// The `bb` has more than one _outgoing_ edge, or exits the function. Save the
@@ -146,7 +143,7 @@ impl CoverageGraph {
// is as intended. (See Issue #78544 for a possible future option to support
// coverage in test programs that panic.)
TerminatorKind::Goto { .. }
- | TerminatorKind::Resume
+ | TerminatorKind::UnwindResume
| TerminatorKind::Unreachable
| TerminatorKind::Drop { .. }
| TerminatorKind::Call { .. }
@@ -199,12 +196,8 @@ impl CoverageGraph {
}
#[inline(always)]
- pub fn rank_partial_cmp(
- &self,
- a: BasicCoverageBlock,
- b: BasicCoverageBlock,
- ) -> Option<Ordering> {
- self.dominators.as_ref().unwrap().rank_partial_cmp(a, b)
+ pub fn cmp_in_dominator_order(&self, a: BasicCoverageBlock, b: BasicCoverageBlock) -> Ordering {
+ self.dominators.as_ref().unwrap().cmp_in_dominator_order(a, b)
}
}
@@ -328,10 +321,6 @@ impl BasicCoverageBlockData {
pub fn terminator<'a, 'tcx>(&self, mir_body: &'a mir::Body<'tcx>) -> &'a Terminator<'tcx> {
&mir_body[self.last_bb()].terminator()
}
-
- pub fn id(&self) -> String {
- format!("@{}", self.basic_blocks.iter().map(|bb| bb.index().to_string()).join(ID_SEPARATOR))
- }
}
/// Represents a successor from a branching BasicCoverageBlock (such as the arms of a `SwitchInt`)
diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs
index 8c9eae508..c75d33eeb 100644
--- a/compiler/rustc_mir_transform/src/coverage/mod.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mod.rs
@@ -1,7 +1,6 @@
pub mod query;
mod counters;
-mod debug;
mod graph;
mod spans;
@@ -20,7 +19,6 @@ use rustc_index::IndexVec;
use rustc_middle::hir;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::coverage::*;
-use rustc_middle::mir::dump_enabled;
use rustc_middle::mir::{
self, BasicBlock, BasicBlockData, Coverage, SourceInfo, Statement, StatementKind, Terminator,
TerminatorKind,
@@ -28,7 +26,7 @@ use rustc_middle::mir::{
use rustc_middle::ty::TyCtxt;
use rustc_span::def_id::DefId;
use rustc_span::source_map::SourceMap;
-use rustc_span::{CharPos, ExpnKind, Pos, SourceFile, Span, Symbol};
+use rustc_span::{ExpnKind, SourceFile, Span, Symbol};
/// A simple error message wrapper for `coverage::Error`s.
#[derive(Debug)]
@@ -94,13 +92,12 @@ impl<'tcx> MirPass<'tcx> for InstrumentCoverage {
}
trace!("InstrumentCoverage starting for {:?}", mir_source.def_id());
- Instrumentor::new(&self.name(), tcx, mir_body).inject_counters();
+ Instrumentor::new(tcx, mir_body).inject_counters();
trace!("InstrumentCoverage done for {:?}", mir_source.def_id());
}
}
struct Instrumentor<'a, 'tcx> {
- pass_name: &'a str,
tcx: TyCtxt<'tcx>,
mir_body: &'a mut mir::Body<'tcx>,
source_file: Lrc<SourceFile>,
@@ -112,7 +109,7 @@ struct Instrumentor<'a, 'tcx> {
}
impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
- fn new(pass_name: &'a str, tcx: TyCtxt<'tcx>, mir_body: &'a mut mir::Body<'tcx>) -> Self {
+ fn new(tcx: TyCtxt<'tcx>, mir_body: &'a mut mir::Body<'tcx>) -> Self {
let source_map = tcx.sess.source_map();
let def_id = mir_body.source.def_id();
let (some_fn_sig, hir_body) = fn_sig_and_body(tcx, def_id);
@@ -141,7 +138,6 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
let coverage_counters = CoverageCounters::new(&basic_coverage_blocks);
Self {
- pass_name,
tcx,
mir_body,
source_file,
@@ -154,28 +150,9 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
}
fn inject_counters(&'a mut self) {
- let tcx = self.tcx;
- let mir_source = self.mir_body.source;
- let def_id = mir_source.def_id();
let fn_sig_span = self.fn_sig_span;
let body_span = self.body_span;
- let mut graphviz_data = debug::GraphvizData::new();
- let mut debug_used_expressions = debug::UsedExpressions::new();
-
- let dump_mir = dump_enabled(tcx, self.pass_name, def_id);
- let dump_graphviz = dump_mir && tcx.sess.opts.unstable_opts.dump_mir_graphviz;
- let dump_spanview = dump_mir && tcx.sess.opts.unstable_opts.dump_mir_spanview.is_some();
-
- if dump_graphviz {
- graphviz_data.enable();
- self.coverage_counters.enable_debug();
- }
-
- if dump_graphviz || level_enabled!(tracing::Level::DEBUG) {
- debug_used_expressions.enable();
- }
-
////////////////////////////////////////////////////
// Compute `CoverageSpan`s from the `CoverageGraph`.
let coverage_spans = CoverageSpans::generate_coverage_spans(
@@ -185,17 +162,6 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
&self.basic_coverage_blocks,
);
- if dump_spanview {
- debug::dump_coverage_spanview(
- tcx,
- self.mir_body,
- &self.basic_coverage_blocks,
- self.pass_name,
- body_span,
- &coverage_spans,
- );
- }
-
////////////////////////////////////////////////////
// Create an optimized mix of `Counter`s and `Expression`s for the `CoverageGraph`. Ensure
// every `CoverageSpan` has a `Counter` or `Expression` assigned to its `BasicCoverageBlock`
@@ -209,14 +175,6 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
.make_bcb_counters(&mut self.basic_coverage_blocks, &coverage_spans);
if let Ok(()) = result {
- // If debugging, add any intermediate expressions (which are not associated with any
- // BCB) to the `debug_used_expressions` map.
- if debug_used_expressions.is_enabled() {
- for intermediate_expression in &self.coverage_counters.intermediate_expressions {
- debug_used_expressions.add_expression_operands(intermediate_expression);
- }
- }
-
////////////////////////////////////////////////////
// Remove the counter or edge counter from of each `CoverageSpan`s associated
// `BasicCoverageBlock`, and inject a `Coverage` statement into the MIR.
@@ -227,11 +185,7 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
// These `CoverageSpan`-associated counters are removed from their associated
// `BasicCoverageBlock`s so that the only remaining counters in the `CoverageGraph`
// are indirect counters (to be injected next, without associated code regions).
- self.inject_coverage_span_counters(
- coverage_spans,
- &mut graphviz_data,
- &mut debug_used_expressions,
- );
+ self.inject_coverage_span_counters(coverage_spans);
////////////////////////////////////////////////////
// For any remaining `BasicCoverageBlock` counters (that were not associated with
@@ -239,37 +193,17 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
// to ensure `BasicCoverageBlock` counters that other `Expression`s may depend on
// are in fact counted, even though they don't directly contribute to counting
// their own independent code region's coverage.
- self.inject_indirect_counters(&mut graphviz_data, &mut debug_used_expressions);
+ self.inject_indirect_counters();
// Intermediate expressions will be injected as the final step, after generating
// debug output, if any.
////////////////////////////////////////////////////
};
- if graphviz_data.is_enabled() {
- // Even if there was an error, a partial CoverageGraph can still generate a useful
- // graphviz output.
- debug::dump_coverage_graphviz(
- tcx,
- self.mir_body,
- self.pass_name,
- &self.basic_coverage_blocks,
- &self.coverage_counters,
- &graphviz_data,
- &self.coverage_counters.intermediate_expressions,
- &debug_used_expressions,
- );
- }
-
if let Err(e) = result {
bug!("Error processing: {:?}: {:?}", self.mir_body.source.def_id(), e.message)
};
- // Depending on current `debug_options()`, `alert_on_unused_expressions()` could panic, so
- // this check is performed as late as possible, to allow other debug output (logs and dump
- // files), which might be helpful in analyzing unused expressions, to still be generated.
- debug_used_expressions.alert_on_unused_expressions(&self.coverage_counters.debug_counters);
-
////////////////////////////////////////////////////
// Finally, inject the intermediate expressions collected along the way.
for intermediate_expression in &self.coverage_counters.intermediate_expressions {
@@ -285,15 +219,7 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
/// `bcb` to its `Counter`, when injected. Subsequent `CoverageSpan`s for a BCB that already has
/// a `Counter` will inject an `Expression` instead, and compute its value by adding `ZERO` to
/// the BCB `Counter` value.
- ///
- /// If debugging, add every BCB `Expression` associated with a `CoverageSpan`s to the
- /// `used_expression_operands` map.
- fn inject_coverage_span_counters(
- &mut self,
- coverage_spans: Vec<CoverageSpan>,
- graphviz_data: &mut debug::GraphvizData,
- debug_used_expressions: &mut debug::UsedExpressions,
- ) {
+ fn inject_coverage_span_counters(&mut self, coverage_spans: Vec<CoverageSpan>) {
let tcx = self.tcx;
let source_map = tcx.sess.source_map();
let body_span = self.body_span;
@@ -307,15 +233,12 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
self.coverage_counters.make_identity_counter(counter_operand)
} else if let Some(counter_kind) = self.coverage_counters.take_bcb_counter(bcb) {
bcb_counters[bcb] = Some(counter_kind.as_operand());
- debug_used_expressions.add_expression_operands(&counter_kind);
counter_kind
} else {
bug!("Every BasicCoverageBlock should have a Counter or Expression");
};
- graphviz_data.add_bcb_coverage_span_with_counter(bcb, &covspan, &counter_kind);
- let code_region =
- make_code_region(source_map, file_name, &self.source_file, span, body_span);
+ let code_region = make_code_region(source_map, file_name, span, body_span);
inject_statement(
self.mir_body,
@@ -334,11 +257,7 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
/// associated with a `CoverageSpan`, should only exist if the counter is an `Expression`
/// dependency (one of the expression operands). Collect them, and inject the additional
/// counters into the MIR, without a reportable coverage span.
- fn inject_indirect_counters(
- &mut self,
- graphviz_data: &mut debug::GraphvizData,
- debug_used_expressions: &mut debug::UsedExpressions,
- ) {
+ fn inject_indirect_counters(&mut self) {
let mut bcb_counters_without_direct_coverage_spans = Vec::new();
for (target_bcb, counter_kind) in self.coverage_counters.drain_bcb_counters() {
bcb_counters_without_direct_coverage_spans.push((None, target_bcb, counter_kind));
@@ -353,19 +272,8 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
));
}
- // If debug is enabled, validate that every BCB or edge counter not directly associated
- // with a coverage span is at least indirectly associated (it is a dependency of a BCB
- // counter that _is_ associated with a coverage span).
- debug_used_expressions.validate(&bcb_counters_without_direct_coverage_spans);
-
for (edge_from_bcb, target_bcb, counter_kind) in bcb_counters_without_direct_coverage_spans
{
- debug_used_expressions.add_unused_expression_if_not_found(
- &counter_kind,
- edge_from_bcb,
- target_bcb,
- );
-
match counter_kind {
BcbCounter::Counter { .. } => {
let inject_to_bb = if let Some(from_bcb) = edge_from_bcb {
@@ -376,26 +284,17 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
let to_bb = self.bcb_leader_bb(target_bcb);
let new_bb = inject_edge_counter_basic_block(self.mir_body, from_bb, to_bb);
- graphviz_data.set_edge_counter(from_bcb, new_bb, &counter_kind);
debug!(
"Edge {:?} (last {:?}) -> {:?} (leader {:?}) requires a new MIR \
- BasicBlock {:?}, for unclaimed edge counter {}",
- edge_from_bcb,
- from_bb,
- target_bcb,
- to_bb,
- new_bb,
- self.format_counter(&counter_kind),
+ BasicBlock {:?}, for unclaimed edge counter {:?}",
+ edge_from_bcb, from_bb, target_bcb, to_bb, new_bb, counter_kind,
);
new_bb
} else {
let target_bb = self.bcb_last_bb(target_bcb);
- graphviz_data.add_bcb_dependency_counter(target_bcb, &counter_kind);
debug!(
- "{:?} ({:?}) gets a new Coverage statement for unclaimed counter {}",
- target_bcb,
- target_bb,
- self.format_counter(&counter_kind),
+ "{:?} ({:?}) gets a new Coverage statement for unclaimed counter {:?}",
+ target_bcb, target_bb, counter_kind,
);
target_bb
};
@@ -430,11 +329,6 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
&self.basic_coverage_blocks[bcb]
}
- #[inline]
- fn format_counter(&self, counter_kind: &BcbCounter) -> String {
- self.coverage_counters.debug_counters.format_counter(counter_kind)
- }
-
fn make_mir_coverage_kind(&self, counter_kind: &BcbCounter) -> CoverageKind {
match *counter_kind {
BcbCounter::Counter { id } => {
@@ -510,40 +404,36 @@ fn inject_intermediate_expression(mir_body: &mut mir::Body<'_>, expression: Cove
fn make_code_region(
source_map: &SourceMap,
file_name: Symbol,
- source_file: &Lrc<SourceFile>,
span: Span,
body_span: Span,
) -> CodeRegion {
debug!(
- "Called make_code_region(file_name={}, source_file={:?}, span={}, body_span={})",
+ "Called make_code_region(file_name={}, span={}, body_span={})",
file_name,
- source_file,
source_map.span_to_diagnostic_string(span),
source_map.span_to_diagnostic_string(body_span)
);
- let (start_line, mut start_col) = source_file.lookup_file_pos(span.lo());
- let (end_line, end_col) = if span.hi() == span.lo() {
- let (end_line, mut end_col) = (start_line, start_col);
+ let (file, mut start_line, mut start_col, mut end_line, mut end_col) =
+ source_map.span_to_location_info(span);
+ if span.hi() == span.lo() {
// Extend an empty span by one character so the region will be counted.
- let CharPos(char_pos) = start_col;
if span.hi() == body_span.hi() {
- start_col = CharPos(char_pos.saturating_sub(1));
+ start_col = start_col.saturating_sub(1);
} else {
- end_col = CharPos(char_pos + 1);
+ end_col = start_col + 1;
}
- (end_line, end_col)
- } else {
- source_file.lookup_file_pos(span.hi())
};
- let start_line = source_map.doctest_offset_line(&source_file.name, start_line);
- let end_line = source_map.doctest_offset_line(&source_file.name, end_line);
+ if let Some(file) = file {
+ start_line = source_map.doctest_offset_line(&file.name, start_line);
+ end_line = source_map.doctest_offset_line(&file.name, end_line);
+ }
CodeRegion {
file_name,
start_line: start_line as u32,
- start_col: start_col.to_u32() + 1,
+ start_col: start_col as u32,
end_line: end_line as u32,
- end_col: end_col.to_u32() + 1,
+ end_col: end_col as u32,
}
}
diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs
index aa205655f..56365c5d4 100644
--- a/compiler/rustc_mir_transform/src/coverage/query.rs
+++ b/compiler/rustc_mir_transform/src/coverage/query.rs
@@ -1,5 +1,6 @@
use super::*;
+use rustc_data_structures::captures::Captures;
use rustc_middle::mir::coverage::*;
use rustc_middle::mir::{self, Body, Coverage, CoverageInfo};
use rustc_middle::query::Providers;
@@ -12,15 +13,10 @@ pub(crate) fn provide(providers: &mut Providers) {
providers.covered_code_regions = |tcx, def_id| covered_code_regions(tcx, def_id);
}
-/// The `num_counters` argument to `llvm.instrprof.increment` is the max counter_id + 1, or in
-/// other words, the number of counter value references injected into the MIR (plus 1 for the
-/// reserved `ZERO` counter, which uses counter ID `0` when included in an expression). Injected
-/// counters have a counter ID from `1..num_counters-1`.
-///
-/// `num_expressions` is the number of counter expressions added to the MIR body.
-///
-/// Both `num_counters` and `num_expressions` are used to initialize new vectors, during backend
-/// code generate, to lookup counters and expressions by simple u32 indexes.
+/// Coverage codegen needs to know the total number of counter IDs and expression IDs that have
+/// been used by a function's coverage mappings. These totals are used to create vectors to hold
+/// the relevant counter and expression data, and the maximum counter ID (+ 1) is also needed by
+/// the `llvm.instrprof.increment` intrinsic.
///
/// MIR optimization may split and duplicate some BasicBlock sequences, or optimize out some code
/// including injected counters. (It is OK if some counters are optimized out, but those counters
@@ -28,71 +24,51 @@ pub(crate) fn provide(providers: &mut Providers) {
/// calls may not work; but computing the number of counters or expressions by adding `1` to the
/// highest ID (for a given instrumented function) is valid.
///
-/// This visitor runs twice, first with `add_missing_operands` set to `false`, to find the maximum
-/// counter ID and maximum expression ID based on their enum variant `id` fields; then, as a
-/// safeguard, with `add_missing_operands` set to `true`, to find any other counter or expression
-/// IDs referenced by expression operands, if not already seen.
-///
-/// Ideally, each operand ID in a MIR `CoverageKind::Expression` will have a separate MIR `Coverage`
-/// statement for the `Counter` or `Expression` with the referenced ID. but since current or future
-/// MIR optimizations can theoretically optimize out segments of a MIR, it may not be possible to
-/// guarantee this, so the second pass ensures the `CoverageInfo` counts include all referenced IDs.
+/// It's possible for a coverage expression to remain in MIR while one or both of its operands
+/// have been optimized away. To avoid problems in codegen, we include those operands' IDs when
+/// determining the maximum counter/expression ID, even if the underlying counter/expression is
+/// no longer present.
struct CoverageVisitor {
- info: CoverageInfo,
- add_missing_operands: bool,
+ max_counter_id: CounterId,
+ max_expression_id: ExpressionId,
}
impl CoverageVisitor {
- /// Updates `num_counters` to the maximum encountered counter ID plus 1.
+ /// Updates `max_counter_id` to the maximum encountered counter ID.
#[inline(always)]
- fn update_num_counters(&mut self, counter_id: CounterId) {
- let counter_id = counter_id.as_u32();
- self.info.num_counters = std::cmp::max(self.info.num_counters, counter_id + 1);
+ fn update_max_counter_id(&mut self, counter_id: CounterId) {
+ self.max_counter_id = self.max_counter_id.max(counter_id);
}
- /// Updates `num_expressions` to the maximum encountered expression ID plus 1.
+ /// Updates `max_expression_id` to the maximum encountered expression ID.
#[inline(always)]
- fn update_num_expressions(&mut self, expression_id: ExpressionId) {
- let expression_id = expression_id.as_u32();
- self.info.num_expressions = std::cmp::max(self.info.num_expressions, expression_id + 1);
+ fn update_max_expression_id(&mut self, expression_id: ExpressionId) {
+ self.max_expression_id = self.max_expression_id.max(expression_id);
}
fn update_from_expression_operand(&mut self, operand: Operand) {
match operand {
- Operand::Counter(id) => self.update_num_counters(id),
- Operand::Expression(id) => self.update_num_expressions(id),
+ Operand::Counter(id) => self.update_max_counter_id(id),
+ Operand::Expression(id) => self.update_max_expression_id(id),
Operand::Zero => {}
}
}
fn visit_body(&mut self, body: &Body<'_>) {
- for bb_data in body.basic_blocks.iter() {
- for statement in bb_data.statements.iter() {
- if let StatementKind::Coverage(box ref coverage) = statement.kind {
- if is_inlined(body, statement) {
- continue;
- }
- self.visit_coverage(coverage);
- }
- }
+ for coverage in all_coverage_in_mir_body(body) {
+ self.visit_coverage(coverage);
}
}
fn visit_coverage(&mut self, coverage: &Coverage) {
- if self.add_missing_operands {
- match coverage.kind {
- CoverageKind::Expression { lhs, rhs, .. } => {
- self.update_from_expression_operand(lhs);
- self.update_from_expression_operand(rhs);
- }
- _ => {}
- }
- } else {
- match coverage.kind {
- CoverageKind::Counter { id, .. } => self.update_num_counters(id),
- CoverageKind::Expression { id, .. } => self.update_num_expressions(id),
- _ => {}
+ match coverage.kind {
+ CoverageKind::Counter { id, .. } => self.update_max_counter_id(id),
+ CoverageKind::Expression { id, lhs, rhs, .. } => {
+ self.update_max_expression_id(id);
+ self.update_from_expression_operand(lhs);
+ self.update_from_expression_operand(rhs);
}
+ CoverageKind::Unreachable => {}
}
}
}
@@ -101,37 +77,40 @@ fn coverageinfo<'tcx>(tcx: TyCtxt<'tcx>, instance_def: ty::InstanceDef<'tcx>) ->
let mir_body = tcx.instance_mir(instance_def);
let mut coverage_visitor = CoverageVisitor {
- info: CoverageInfo { num_counters: 0, num_expressions: 0 },
- add_missing_operands: false,
+ max_counter_id: CounterId::START,
+ max_expression_id: ExpressionId::START,
};
coverage_visitor.visit_body(mir_body);
- coverage_visitor.add_missing_operands = true;
- coverage_visitor.visit_body(mir_body);
-
- coverage_visitor.info
+ // Add 1 to the highest IDs to get the total number of IDs.
+ CoverageInfo {
+ num_counters: (coverage_visitor.max_counter_id + 1).as_u32(),
+ num_expressions: (coverage_visitor.max_expression_id + 1).as_u32(),
+ }
}
fn covered_code_regions(tcx: TyCtxt<'_>, def_id: DefId) -> Vec<&CodeRegion> {
let body = mir_body(tcx, def_id);
- body.basic_blocks
- .iter()
- .flat_map(|data| {
- data.statements.iter().filter_map(|statement| match statement.kind {
- StatementKind::Coverage(box ref coverage) => {
- if is_inlined(body, statement) {
- None
- } else {
- coverage.code_region.as_ref() // may be None
- }
- }
- _ => None,
- })
- })
+ all_coverage_in_mir_body(body)
+ // Not all coverage statements have an attached code region.
+ .filter_map(|coverage| coverage.code_region.as_ref())
.collect()
}
+fn all_coverage_in_mir_body<'a, 'tcx>(
+ body: &'a Body<'tcx>,
+) -> impl Iterator<Item = &'a Coverage> + Captures<'tcx> {
+ body.basic_blocks.iter().flat_map(|bb_data| &bb_data.statements).filter_map(|statement| {
+ match statement.kind {
+ StatementKind::Coverage(box ref coverage) if !is_inlined(body, statement) => {
+ Some(coverage)
+ }
+ _ => None,
+ }
+ })
+}
+
fn is_inlined(body: &Body<'_>, statement: &Statement<'_>) -> bool {
let scope_data = &body.source_scopes[statement.source_info.scope];
scope_data.inlined.is_some() || scope_data.inlined_parent_scope.is_some()
diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs
index deebf5345..ed0e104d6 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans.rs
@@ -1,18 +1,14 @@
use super::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph, START_BCB};
-use itertools::Itertools;
use rustc_data_structures::graph::WithNumNodes;
-use rustc_middle::mir::spanview::source_range_no_file;
use rustc_middle::mir::{
self, AggregateKind, BasicBlock, FakeReadCause, Rvalue, Statement, StatementKind, Terminator,
TerminatorKind,
};
-use rustc_middle::ty::TyCtxt;
use rustc_span::source_map::original_sp;
use rustc_span::{BytePos, ExpnKind, MacroKind, Span, Symbol};
use std::cell::OnceCell;
-use std::cmp::Ordering;
#[derive(Debug, Copy, Clone)]
pub(super) enum CoverageStatement {
@@ -21,31 +17,6 @@ pub(super) enum CoverageStatement {
}
impl CoverageStatement {
- pub fn format<'tcx>(&self, tcx: TyCtxt<'tcx>, mir_body: &mir::Body<'tcx>) -> String {
- match *self {
- Self::Statement(bb, span, stmt_index) => {
- let stmt = &mir_body[bb].statements[stmt_index];
- format!(
- "{}: @{}[{}]: {:?}",
- source_range_no_file(tcx, span),
- bb.index(),
- stmt_index,
- stmt
- )
- }
- Self::Terminator(bb, span) => {
- let term = mir_body[bb].terminator();
- format!(
- "{}: @{}.{}: {:?}",
- source_range_no_file(tcx, span),
- bb.index(),
- term.kind.name(),
- term.kind
- )
- }
- }
- }
-
pub fn span(&self) -> Span {
match self {
Self::Statement(_, span, _) | Self::Terminator(_, span) => *span,
@@ -151,27 +122,6 @@ impl CoverageSpan {
self.bcb == other.bcb
}
- pub fn format<'tcx>(&self, tcx: TyCtxt<'tcx>, mir_body: &mir::Body<'tcx>) -> String {
- format!(
- "{}\n {}",
- source_range_no_file(tcx, self.span),
- self.format_coverage_statements(tcx, mir_body).replace('\n', "\n "),
- )
- }
-
- pub fn format_coverage_statements<'tcx>(
- &self,
- tcx: TyCtxt<'tcx>,
- mir_body: &mir::Body<'tcx>,
- ) -> String {
- let mut sorted_coverage_statements = self.coverage_statements.clone();
- sorted_coverage_statements.sort_unstable_by_key(|covstmt| match *covstmt {
- CoverageStatement::Statement(bb, _, index) => (bb, index),
- CoverageStatement::Terminator(bb, _) => (bb, usize::MAX),
- });
- sorted_coverage_statements.iter().map(|covstmt| covstmt.format(tcx, mir_body)).join("\n")
- }
-
/// If the span is part of a macro, returns the macro name symbol.
pub fn current_macro(&self) -> Option<Symbol> {
self.current_macro_or_none
@@ -333,30 +283,21 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
initial_spans.push(CoverageSpan::for_fn_sig(self.fn_sig_span));
- initial_spans.sort_unstable_by(|a, b| {
- if a.span.lo() == b.span.lo() {
- if a.span.hi() == b.span.hi() {
- if a.is_in_same_bcb(b) {
- Some(Ordering::Equal)
- } else {
- // Sort equal spans by dominator relationship (so dominators always come
- // before the dominated equal spans). When later comparing two spans in
- // order, the first will either dominate the second, or they will have no
- // dominator relationship.
- self.basic_coverage_blocks.rank_partial_cmp(a.bcb, b.bcb)
- }
- } else {
- // Sort hi() in reverse order so shorter spans are attempted after longer spans.
- // This guarantees that, if a `prev` span overlaps, and is not equal to, a
- // `curr` span, the prev span either extends further left of the curr span, or
- // they start at the same position and the prev span extends further right of
- // the end of the curr span.
- b.span.hi().partial_cmp(&a.span.hi())
- }
- } else {
- a.span.lo().partial_cmp(&b.span.lo())
- }
- .unwrap()
+ initial_spans.sort_by(|a, b| {
+ // First sort by span start.
+ Ord::cmp(&a.span.lo(), &b.span.lo())
+ // If span starts are the same, sort by span end in reverse order.
+ // This ensures that if spans A and B are adjacent in the list,
+ // and they overlap but are not equal, then either:
+ // - Span A extends further left, or
+ // - Both have the same start and span A extends further right
+ .then_with(|| Ord::cmp(&a.span.hi(), &b.span.hi()).reverse())
+ // If both spans are equal, sort the BCBs in dominator order,
+ // so that dominating BCBs come before other BCBs they dominate.
+ .then_with(|| self.basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb))
+ // If two spans are otherwise identical, put closure spans first,
+ // as this seems to be what the refinement step expects.
+ .then_with(|| Ord::cmp(&a.is_closure, &b.is_closure).reverse())
});
initial_spans
@@ -486,6 +427,12 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
let merged_prefix_len = self.curr_original_span.lo() - self.curr().span.lo();
let after_macro_bang =
merged_prefix_len + BytePos(visible_macro.as_str().len() as u32 + 1);
+ if self.curr().span.lo() + after_macro_bang > self.curr().span.hi() {
+ // Something is wrong with the macro name span;
+ // return now to avoid emitting malformed mappings.
+ // FIXME(#117788): Track down why this happens.
+ return;
+ }
let mut macro_name_cov = self.curr().clone();
self.curr_mut().span =
self.curr().span.with_lo(self.curr().span.lo() + after_macro_bang);
@@ -822,7 +769,7 @@ pub(super) fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span>
// and `_1` is the `Place` for `somenum`.
//
// If and when the Issue is resolved, remove this special case match pattern:
- StatementKind::FakeRead(box (cause, _)) if cause == FakeReadCause::ForGuardBinding => None,
+ StatementKind::FakeRead(box (FakeReadCause::ForGuardBinding, _)) => None,
// Retain spans from all other statements
StatementKind::FakeRead(box (_, _)) // Not including `ForGuardBinding`
@@ -867,8 +814,8 @@ pub(super) fn filtered_terminator_span(terminator: &Terminator<'_>) -> Option<Sp
}
// Retain spans from all other terminators
- TerminatorKind::Resume
- | TerminatorKind::Terminate
+ TerminatorKind::UnwindResume
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
| TerminatorKind::Yield { .. }
| TerminatorKind::GeneratorDrop
diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
index 8f4dc9f69..7b14fef61 100644
--- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
+++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
@@ -3,17 +3,19 @@
//! Currently, this pass only propagates scalar values.
use rustc_const_eval::const_eval::CheckAlignment;
-use rustc_const_eval::interpret::{ConstValue, ImmTy, Immediate, InterpCx, Scalar};
+use rustc_const_eval::interpret::{ImmTy, Immediate, InterpCx, OpTy, Projectable};
use rustc_data_structures::fx::FxHashMap;
use rustc_hir::def::DefKind;
-use rustc_middle::mir::visit::{MutVisitor, Visitor};
+use rustc_middle::mir::interpret::{AllocId, ConstAllocation, InterpResult, Scalar};
+use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
use rustc_middle::mir::*;
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_mir_dataflow::value_analysis::{
- Map, State, TrackElem, ValueAnalysis, ValueAnalysisWrapper, ValueOrPlace,
+ Map, PlaceIndex, State, TrackElem, ValueAnalysis, ValueAnalysisWrapper, ValueOrPlace,
};
use rustc_mir_dataflow::{lattice::FlatSet, Analysis, Results, ResultsVisitor};
+use rustc_span::def_id::DefId;
use rustc_span::DUMMY_SP;
use rustc_target::abi::{Align, FieldIdx, VariantIdx};
@@ -50,7 +52,7 @@ impl<'tcx> MirPass<'tcx> for DataflowConstProp {
let place_limit = if tcx.sess.mir_opt_level() < 4 { Some(PLACE_LIMIT) } else { None };
// Decide which places to track during the analysis.
- let map = Map::from_filter(tcx, body, Ty::is_scalar, place_limit);
+ let map = Map::new(tcx, body, place_limit);
// Perform the actual dataflow analysis.
let analysis = ConstAnalysis::new(tcx, body, map);
@@ -58,9 +60,10 @@ impl<'tcx> MirPass<'tcx> for DataflowConstProp {
.in_scope(|| analysis.wrap().into_engine(tcx, body).iterate_to_fixpoint());
// Collect results and patch the body afterwards.
- let mut visitor = CollectAndPatch::new(tcx);
+ let mut visitor = Collector::new(tcx, &body.local_decls);
debug_span!("collect").in_scope(|| results.visit_reachable_with(body, &mut visitor));
- debug_span!("patch").in_scope(|| visitor.visit_body(body));
+ let mut patch = visitor.patch;
+ debug_span!("patch").in_scope(|| patch.visit_body_preserves_cfg(body));
}
}
@@ -73,7 +76,7 @@ struct ConstAnalysis<'a, 'tcx> {
}
impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
- type Value = FlatSet<ScalarTy<'tcx>>;
+ type Value = FlatSet<Scalar>;
const NAME: &'static str = "ConstAnalysis";
@@ -107,6 +110,18 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
state: &mut State<Self::Value>,
) {
match rvalue {
+ Rvalue::Use(operand) => {
+ state.flood(target.as_ref(), self.map());
+ if let Some(target) = self.map.find(target.as_ref()) {
+ self.assign_operand(state, target, operand);
+ }
+ }
+ Rvalue::CopyForDeref(rhs) => {
+ state.flood(target.as_ref(), self.map());
+ if let Some(target) = self.map.find(target.as_ref()) {
+ self.assign_operand(state, target, &Operand::Copy(*rhs));
+ }
+ }
Rvalue::Aggregate(kind, operands) => {
// If we assign `target = Enum::Variant#0(operand)`,
// we must make sure that all `target as Variant#i` are `Top`.
@@ -134,8 +149,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
variant_target_idx,
TrackElem::Field(FieldIdx::from_usize(field_index)),
) {
- let result = self.handle_operand(operand, state);
- state.insert_idx(field, result, self.map());
+ self.assign_operand(state, field, operand);
}
}
}
@@ -172,9 +186,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
if let Some(overflow_target) = overflow_target {
let overflow = match overflow {
FlatSet::Top => FlatSet::Top,
- FlatSet::Elem(overflow) => {
- self.wrap_scalar(Scalar::from_bool(overflow), self.tcx.types.bool)
- }
+ FlatSet::Elem(overflow) => FlatSet::Elem(Scalar::from_bool(overflow)),
FlatSet::Bottom => FlatSet::Bottom,
};
// We have flooded `target` earlier.
@@ -182,6 +194,23 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
}
}
}
+ Rvalue::Cast(
+ CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize),
+ operand,
+ _,
+ ) => {
+ let pointer = self.handle_operand(operand, state);
+ state.assign(target.as_ref(), pointer, self.map());
+
+ if let Some(target_len) = self.map().find_len(target.as_ref())
+ && let operand_ty = operand.ty(self.local_decls, self.tcx)
+ && let Some(operand_ty) = operand_ty.builtin_deref(true)
+ && let ty::Array(_, len) = operand_ty.ty.kind()
+ && let Some(len) = Const::Ty(*len).try_eval_scalar_int(self.tcx, self.param_env)
+ {
+ state.insert_value_idx(target_len, FlatSet::Elem(len.into()), self.map());
+ }
+ }
_ => self.super_assign(target, rvalue, state),
}
}
@@ -191,60 +220,94 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
rvalue: &Rvalue<'tcx>,
state: &mut State<Self::Value>,
) -> ValueOrPlace<Self::Value> {
- match rvalue {
- Rvalue::Cast(
- kind @ (CastKind::IntToInt
- | CastKind::FloatToInt
- | CastKind::FloatToFloat
- | CastKind::IntToFloat),
- operand,
- ty,
- ) => match self.eval_operand(operand, state) {
- FlatSet::Elem(op) => match kind {
- CastKind::IntToInt | CastKind::IntToFloat => {
- self.ecx.int_to_int_or_float(&op, *ty)
- }
- CastKind::FloatToInt | CastKind::FloatToFloat => {
- self.ecx.float_to_float_or_int(&op, *ty)
- }
- _ => unreachable!(),
+ let val = match rvalue {
+ Rvalue::Len(place) => {
+ let place_ty = place.ty(self.local_decls, self.tcx);
+ if let ty::Array(_, len) = place_ty.ty.kind() {
+ Const::Ty(*len)
+ .try_eval_scalar(self.tcx, self.param_env)
+ .map_or(FlatSet::Top, FlatSet::Elem)
+ } else if let [ProjectionElem::Deref] = place.projection[..] {
+ state.get_len(place.local.into(), self.map())
+ } else {
+ FlatSet::Top
}
- .map(|result| ValueOrPlace::Value(self.wrap_immediate(result, *ty)))
- .unwrap_or(ValueOrPlace::TOP),
- _ => ValueOrPlace::TOP,
- },
+ }
+ Rvalue::Cast(CastKind::IntToInt | CastKind::IntToFloat, operand, ty) => {
+ let Ok(layout) = self.tcx.layout_of(self.param_env.and(*ty)) else {
+ return ValueOrPlace::Value(FlatSet::Top);
+ };
+ match self.eval_operand(operand, state) {
+ FlatSet::Elem(op) => self
+ .ecx
+ .int_to_int_or_float(&op, layout)
+ .map_or(FlatSet::Top, |result| self.wrap_immediate(*result)),
+ FlatSet::Bottom => FlatSet::Bottom,
+ FlatSet::Top => FlatSet::Top,
+ }
+ }
+ Rvalue::Cast(CastKind::FloatToInt | CastKind::FloatToFloat, operand, ty) => {
+ let Ok(layout) = self.tcx.layout_of(self.param_env.and(*ty)) else {
+ return ValueOrPlace::Value(FlatSet::Top);
+ };
+ match self.eval_operand(operand, state) {
+ FlatSet::Elem(op) => self
+ .ecx
+ .float_to_float_or_int(&op, layout)
+ .map_or(FlatSet::Top, |result| self.wrap_immediate(*result)),
+ FlatSet::Bottom => FlatSet::Bottom,
+ FlatSet::Top => FlatSet::Top,
+ }
+ }
+ Rvalue::Cast(CastKind::Transmute, operand, _) => {
+ match self.eval_operand(operand, state) {
+ FlatSet::Elem(op) => self.wrap_immediate(*op),
+ FlatSet::Bottom => FlatSet::Bottom,
+ FlatSet::Top => FlatSet::Top,
+ }
+ }
Rvalue::BinaryOp(op, box (left, right)) => {
// Overflows must be ignored here.
let (val, _overflow) = self.binary_op(state, *op, left, right);
- ValueOrPlace::Value(val)
+ val
}
Rvalue::UnaryOp(op, operand) => match self.eval_operand(operand, state) {
FlatSet::Elem(value) => self
.ecx
- .unary_op(*op, &value)
- .map(|val| ValueOrPlace::Value(self.wrap_immty(val)))
- .unwrap_or(ValueOrPlace::Value(FlatSet::Top)),
- FlatSet::Bottom => ValueOrPlace::Value(FlatSet::Bottom),
- FlatSet::Top => ValueOrPlace::Value(FlatSet::Top),
+ .wrapping_unary_op(*op, &value)
+ .map_or(FlatSet::Top, |val| self.wrap_immediate(*val)),
+ FlatSet::Bottom => FlatSet::Bottom,
+ FlatSet::Top => FlatSet::Top,
},
- Rvalue::Discriminant(place) => {
- ValueOrPlace::Value(state.get_discr(place.as_ref(), self.map()))
+ Rvalue::NullaryOp(null_op, ty) => {
+ let Ok(layout) = self.tcx.layout_of(self.param_env.and(*ty)) else {
+ return ValueOrPlace::Value(FlatSet::Top);
+ };
+ let val = match null_op {
+ NullOp::SizeOf if layout.is_sized() => layout.size.bytes(),
+ NullOp::AlignOf if layout.is_sized() => layout.align.abi.bytes(),
+ NullOp::OffsetOf(fields) => layout
+ .offset_of_subfield(&self.ecx, fields.iter().map(|f| f.index()))
+ .bytes(),
+ _ => return ValueOrPlace::Value(FlatSet::Top),
+ };
+ FlatSet::Elem(Scalar::from_target_usize(val, &self.tcx))
}
- _ => self.super_rvalue(rvalue, state),
- }
+ Rvalue::Discriminant(place) => state.get_discr(place.as_ref(), self.map()),
+ _ => return self.super_rvalue(rvalue, state),
+ };
+ ValueOrPlace::Value(val)
}
fn handle_constant(
&self,
- constant: &Constant<'tcx>,
+ constant: &ConstOperand<'tcx>,
_state: &mut State<Self::Value>,
) -> Self::Value {
constant
- .literal
- .eval(self.tcx, self.param_env)
- .try_to_scalar()
- .map(|value| FlatSet::Elem(ScalarTy(value, constant.ty())))
- .unwrap_or(FlatSet::Top)
+ .const_
+ .try_eval_scalar(self.tcx, self.param_env)
+ .map_or(FlatSet::Top, FlatSet::Elem)
}
fn handle_switch_int<'mir>(
@@ -261,9 +324,8 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
// We are branching on uninitialized data, this is UB, treat it as unreachable.
// This allows the set of visited edges to grow monotonically with the lattice.
FlatSet::Bottom => TerminatorEdges::None,
- FlatSet::Elem(ScalarTy(scalar, _)) => {
- let int = scalar.assert_int();
- let choice = int.assert_bits(int.size());
+ FlatSet::Elem(scalar) => {
+ let choice = scalar.assert_bits(scalar.size());
TerminatorEdges::Single(targets.target_for_value(choice))
}
FlatSet::Top => TerminatorEdges::SwitchInt { discr, targets },
@@ -271,16 +333,6 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
}
}
-#[derive(Clone, PartialEq, Eq)]
-struct ScalarTy<'tcx>(Scalar, Ty<'tcx>);
-
-impl<'tcx> std::fmt::Debug for ScalarTy<'tcx> {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- // This is used for dataflow visualization, so we return something more concise.
- std::fmt::Display::fmt(&ConstantKind::Val(ConstValue::Scalar(self.0), self.1), f)
- }
-}
-
impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, map: Map) -> Self {
let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
@@ -293,34 +345,146 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
}
}
+ /// The caller must have flooded `place`.
+ fn assign_operand(
+ &self,
+ state: &mut State<FlatSet<Scalar>>,
+ place: PlaceIndex,
+ operand: &Operand<'tcx>,
+ ) {
+ match operand {
+ Operand::Copy(rhs) | Operand::Move(rhs) => {
+ if let Some(rhs) = self.map.find(rhs.as_ref()) {
+ state.insert_place_idx(place, rhs, &self.map);
+ } else if rhs.projection.first() == Some(&PlaceElem::Deref)
+ && let FlatSet::Elem(pointer) = state.get(rhs.local.into(), &self.map)
+ && let rhs_ty = self.local_decls[rhs.local].ty
+ && let Ok(rhs_layout) = self.tcx.layout_of(self.param_env.and(rhs_ty))
+ {
+ let op = ImmTy::from_scalar(pointer, rhs_layout).into();
+ self.assign_constant(state, place, op, &rhs.projection);
+ }
+ }
+ Operand::Constant(box constant) => {
+ if let Ok(constant) = self.ecx.eval_mir_constant(&constant.const_, None, None) {
+ self.assign_constant(state, place, constant, &[]);
+ }
+ }
+ }
+ }
+
+ /// The caller must have flooded `place`.
+ ///
+ /// Perform: `place = operand.projection`.
+ #[instrument(level = "trace", skip(self, state))]
+ fn assign_constant(
+ &self,
+ state: &mut State<FlatSet<Scalar>>,
+ place: PlaceIndex,
+ mut operand: OpTy<'tcx>,
+ projection: &[PlaceElem<'tcx>],
+ ) -> Option<!> {
+ for &(mut proj_elem) in projection {
+ if let PlaceElem::Index(index) = proj_elem {
+ if let FlatSet::Elem(index) = state.get(index.into(), &self.map)
+ && let Ok(offset) = index.to_target_usize(&self.tcx)
+ && let Some(min_length) = offset.checked_add(1)
+ {
+ proj_elem = PlaceElem::ConstantIndex { offset, min_length, from_end: false };
+ } else {
+ return None;
+ }
+ }
+ operand = self.ecx.project(&operand, proj_elem).ok()?;
+ }
+
+ self.map.for_each_projection_value(
+ place,
+ operand,
+ &mut |elem, op| match elem {
+ TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).ok(),
+ TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).ok(),
+ TrackElem::Discriminant => {
+ let variant = self.ecx.read_discriminant(op).ok()?;
+ let discr_value = self.ecx.discriminant_for_variant(op.layout, variant).ok()?;
+ Some(discr_value.into())
+ }
+ TrackElem::DerefLen => {
+ let op: OpTy<'_> = self.ecx.deref_pointer(op).ok()?.into();
+ let len_usize = op.len(&self.ecx).ok()?;
+ let layout =
+ self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).unwrap();
+ Some(ImmTy::from_uint(len_usize, layout).into())
+ }
+ },
+ &mut |place, op| {
+ if let Ok(imm) = self.ecx.read_immediate_raw(op)
+ && let Some(imm) = imm.right()
+ {
+ let elem = self.wrap_immediate(*imm);
+ state.insert_value_idx(place, elem, &self.map);
+ }
+ },
+ );
+
+ None
+ }
+
fn binary_op(
&self,
- state: &mut State<FlatSet<ScalarTy<'tcx>>>,
+ state: &mut State<FlatSet<Scalar>>,
op: BinOp,
left: &Operand<'tcx>,
right: &Operand<'tcx>,
- ) -> (FlatSet<ScalarTy<'tcx>>, FlatSet<bool>) {
+ ) -> (FlatSet<Scalar>, FlatSet<bool>) {
let left = self.eval_operand(left, state);
let right = self.eval_operand(right, state);
+
match (left, right) {
+ (FlatSet::Bottom, _) | (_, FlatSet::Bottom) => (FlatSet::Bottom, FlatSet::Bottom),
+ // Both sides are known, do the actual computation.
(FlatSet::Elem(left), FlatSet::Elem(right)) => {
match self.ecx.overflowing_binary_op(op, &left, &right) {
- Ok((val, overflow, ty)) => (self.wrap_scalar(val, ty), FlatSet::Elem(overflow)),
+ Ok((val, overflow)) => {
+ (FlatSet::Elem(val.to_scalar()), FlatSet::Elem(overflow))
+ }
_ => (FlatSet::Top, FlatSet::Top),
}
}
- (FlatSet::Bottom, _) | (_, FlatSet::Bottom) => (FlatSet::Bottom, FlatSet::Bottom),
- (_, _) => {
- // Could attempt some algebraic simplifications here.
- (FlatSet::Top, FlatSet::Top)
+ // Exactly one side is known, attempt some algebraic simplifications.
+ (FlatSet::Elem(const_arg), _) | (_, FlatSet::Elem(const_arg)) => {
+ let layout = const_arg.layout;
+ if !matches!(layout.abi, rustc_target::abi::Abi::Scalar(..)) {
+ return (FlatSet::Top, FlatSet::Top);
+ }
+
+ let arg_scalar = const_arg.to_scalar();
+ let Ok(arg_value) = arg_scalar.to_bits(layout.size) else {
+ return (FlatSet::Top, FlatSet::Top);
+ };
+
+ match op {
+ BinOp::BitAnd if arg_value == 0 => (FlatSet::Elem(arg_scalar), FlatSet::Bottom),
+ BinOp::BitOr
+ if arg_value == layout.size.truncate(u128::MAX)
+ || (layout.ty.is_bool() && arg_value == 1) =>
+ {
+ (FlatSet::Elem(arg_scalar), FlatSet::Bottom)
+ }
+ BinOp::Mul if layout.ty.is_integral() && arg_value == 0 => {
+ (FlatSet::Elem(arg_scalar), FlatSet::Elem(false))
+ }
+ _ => (FlatSet::Top, FlatSet::Top),
+ }
}
+ (FlatSet::Top, FlatSet::Top) => (FlatSet::Top, FlatSet::Top),
}
}
fn eval_operand(
&self,
op: &Operand<'tcx>,
- state: &mut State<FlatSet<ScalarTy<'tcx>>>,
+ state: &mut State<FlatSet<Scalar>>,
) -> FlatSet<ImmTy<'tcx>> {
let value = match self.handle_operand(op, state) {
ValueOrPlace::Value(value) => value,
@@ -328,80 +492,89 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
};
match value {
FlatSet::Top => FlatSet::Top,
- FlatSet::Elem(ScalarTy(scalar, ty)) => self
- .tcx
- .layout_of(self.param_env.and(ty))
- .map(|layout| FlatSet::Elem(ImmTy::from_scalar(scalar, layout)))
- .unwrap_or(FlatSet::Top),
+ FlatSet::Elem(scalar) => {
+ let ty = op.ty(self.local_decls, self.tcx);
+ self.tcx.layout_of(self.param_env.and(ty)).map_or(FlatSet::Top, |layout| {
+ FlatSet::Elem(ImmTy::from_scalar(scalar.into(), layout))
+ })
+ }
FlatSet::Bottom => FlatSet::Bottom,
}
}
- fn eval_discriminant(
- &self,
- enum_ty: Ty<'tcx>,
- variant_index: VariantIdx,
- ) -> Option<ScalarTy<'tcx>> {
+ fn eval_discriminant(&self, enum_ty: Ty<'tcx>, variant_index: VariantIdx) -> Option<Scalar> {
if !enum_ty.is_enum() {
return None;
}
- let discr = enum_ty.discriminant_for_variant(self.tcx, variant_index)?;
- let discr_layout = self.tcx.layout_of(self.param_env.and(discr.ty)).ok()?;
- let discr_value = Scalar::try_from_uint(discr.val, discr_layout.size)?;
- Some(ScalarTy(discr_value, discr.ty))
- }
-
- fn wrap_scalar(&self, scalar: Scalar, ty: Ty<'tcx>) -> FlatSet<ScalarTy<'tcx>> {
- FlatSet::Elem(ScalarTy(scalar, ty))
+ let enum_ty_layout = self.tcx.layout_of(self.param_env.and(enum_ty)).ok()?;
+ let discr_value = self.ecx.discriminant_for_variant(enum_ty_layout, variant_index).ok()?;
+ Some(discr_value.to_scalar())
}
- fn wrap_immediate(&self, imm: Immediate, ty: Ty<'tcx>) -> FlatSet<ScalarTy<'tcx>> {
+ fn wrap_immediate(&self, imm: Immediate) -> FlatSet<Scalar> {
match imm {
- Immediate::Scalar(scalar) => self.wrap_scalar(scalar, ty),
+ Immediate::Scalar(scalar) => FlatSet::Elem(scalar),
+ Immediate::Uninit => FlatSet::Bottom,
_ => FlatSet::Top,
}
}
-
- fn wrap_immty(&self, val: ImmTy<'tcx>) -> FlatSet<ScalarTy<'tcx>> {
- self.wrap_immediate(*val, val.layout.ty)
- }
}
-struct CollectAndPatch<'tcx> {
+pub(crate) struct Patch<'tcx> {
tcx: TyCtxt<'tcx>,
/// For a given MIR location, this stores the values of the operands used by that location. In
/// particular, this is before the effect, such that the operands of `_1 = _1 + _2` are
/// properly captured. (This may become UB soon, but it is currently emitted even by safe code.)
- before_effect: FxHashMap<(Location, Place<'tcx>), ScalarTy<'tcx>>,
+ pub(crate) before_effect: FxHashMap<(Location, Place<'tcx>), Const<'tcx>>,
/// Stores the assigned values for assignments where the Rvalue is constant.
- assignments: FxHashMap<Location, ScalarTy<'tcx>>,
+ pub(crate) assignments: FxHashMap<Location, Const<'tcx>>,
}
-impl<'tcx> CollectAndPatch<'tcx> {
- fn new(tcx: TyCtxt<'tcx>) -> Self {
+impl<'tcx> Patch<'tcx> {
+ pub(crate) fn new(tcx: TyCtxt<'tcx>) -> Self {
Self { tcx, before_effect: FxHashMap::default(), assignments: FxHashMap::default() }
}
- fn make_operand(&self, scalar: ScalarTy<'tcx>) -> Operand<'tcx> {
- Operand::Constant(Box::new(Constant {
- span: DUMMY_SP,
- user_ty: None,
- literal: ConstantKind::Val(ConstValue::Scalar(scalar.0), scalar.1),
- }))
+ fn make_operand(&self, const_: Const<'tcx>) -> Operand<'tcx> {
+ Operand::Constant(Box::new(ConstOperand { span: DUMMY_SP, user_ty: None, const_ }))
+ }
+}
+
+struct Collector<'tcx, 'locals> {
+ patch: Patch<'tcx>,
+ local_decls: &'locals LocalDecls<'tcx>,
+}
+
+impl<'tcx, 'locals> Collector<'tcx, 'locals> {
+ pub(crate) fn new(tcx: TyCtxt<'tcx>, local_decls: &'locals LocalDecls<'tcx>) -> Self {
+ Self { patch: Patch::new(tcx), local_decls }
+ }
+
+ fn try_make_constant(
+ &self,
+ place: Place<'tcx>,
+ state: &State<FlatSet<Scalar>>,
+ map: &Map,
+ ) -> Option<Const<'tcx>> {
+ let FlatSet::Elem(Scalar::Int(value)) = state.get(place.as_ref(), &map) else {
+ return None;
+ };
+ let ty = place.ty(self.local_decls, self.patch.tcx).ty;
+ Some(Const::Val(ConstValue::Scalar(value.into()), ty))
}
}
impl<'mir, 'tcx>
ResultsVisitor<'mir, 'tcx, Results<'tcx, ValueAnalysisWrapper<ConstAnalysis<'_, 'tcx>>>>
- for CollectAndPatch<'tcx>
+ for Collector<'tcx, '_>
{
- type FlowState = State<FlatSet<ScalarTy<'tcx>>>;
+ type FlowState = State<FlatSet<Scalar>>;
fn visit_statement_before_primary_effect(
&mut self,
- results: &Results<'tcx, ValueAnalysisWrapper<ConstAnalysis<'_, 'tcx>>>,
+ results: &mut Results<'tcx, ValueAnalysisWrapper<ConstAnalysis<'_, 'tcx>>>,
state: &Self::FlowState,
statement: &'mir Statement<'tcx>,
location: Location,
@@ -417,7 +590,7 @@ impl<'mir, 'tcx>
fn visit_statement_after_primary_effect(
&mut self,
- results: &Results<'tcx, ValueAnalysisWrapper<ConstAnalysis<'_, 'tcx>>>,
+ results: &mut Results<'tcx, ValueAnalysisWrapper<ConstAnalysis<'_, 'tcx>>>,
state: &Self::FlowState,
statement: &'mir Statement<'tcx>,
location: Location,
@@ -427,14 +600,8 @@ impl<'mir, 'tcx>
// Don't overwrite the assignment if it already uses a constant (to keep the span).
}
StatementKind::Assign(box (place, _)) => {
- match state.get(place.as_ref(), &results.analysis.0.map) {
- FlatSet::Top => (),
- FlatSet::Elem(value) => {
- self.assignments.insert(location, value);
- }
- FlatSet::Bottom => {
- // This assignment is either unreachable, or an uninitialized value is assigned.
- }
+ if let Some(value) = self.try_make_constant(place, state, &results.analysis.0.map) {
+ self.patch.assignments.insert(location, value);
}
}
_ => (),
@@ -443,7 +610,7 @@ impl<'mir, 'tcx>
fn visit_terminator_before_primary_effect(
&mut self,
- results: &Results<'tcx, ValueAnalysisWrapper<ConstAnalysis<'_, 'tcx>>>,
+ results: &mut Results<'tcx, ValueAnalysisWrapper<ConstAnalysis<'_, 'tcx>>>,
state: &Self::FlowState,
terminator: &'mir Terminator<'tcx>,
location: Location,
@@ -453,8 +620,8 @@ impl<'mir, 'tcx>
}
}
-impl<'tcx> MutVisitor<'tcx> for CollectAndPatch<'tcx> {
- fn tcx<'a>(&'a self) -> TyCtxt<'tcx> {
+impl<'tcx> MutVisitor<'tcx> for Patch<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
@@ -462,7 +629,7 @@ impl<'tcx> MutVisitor<'tcx> for CollectAndPatch<'tcx> {
if let Some(value) = self.assignments.get(&location) {
match &mut statement.kind {
StatementKind::Assign(box (_, rvalue)) => {
- *rvalue = Rvalue::Use(self.make_operand(value.clone()));
+ *rvalue = Rvalue::Use(self.make_operand(*value));
}
_ => bug!("found assignment info for non-assign statement"),
}
@@ -475,33 +642,61 @@ impl<'tcx> MutVisitor<'tcx> for CollectAndPatch<'tcx> {
match operand {
Operand::Copy(place) | Operand::Move(place) => {
if let Some(value) = self.before_effect.get(&(location, *place)) {
- *operand = self.make_operand(value.clone());
+ *operand = self.make_operand(*value);
+ } else if !place.projection.is_empty() {
+ self.super_operand(operand, location)
}
}
- _ => (),
+ Operand::Constant(_) => {}
+ }
+ }
+
+ fn process_projection_elem(
+ &mut self,
+ elem: PlaceElem<'tcx>,
+ location: Location,
+ ) -> Option<PlaceElem<'tcx>> {
+ if let PlaceElem::Index(local) = elem {
+ let offset = self.before_effect.get(&(location, local.into()))?;
+ let offset = offset.try_to_scalar()?;
+ let offset = offset.to_target_usize(&self.tcx).ok()?;
+ let min_length = offset.checked_add(1)?;
+ Some(PlaceElem::ConstantIndex { offset, min_length, from_end: false })
+ } else {
+ None
}
}
}
-struct OperandCollector<'tcx, 'map, 'a> {
- state: &'a State<FlatSet<ScalarTy<'tcx>>>,
- visitor: &'a mut CollectAndPatch<'tcx>,
+struct OperandCollector<'tcx, 'map, 'locals, 'a> {
+ state: &'a State<FlatSet<Scalar>>,
+ visitor: &'a mut Collector<'tcx, 'locals>,
map: &'map Map,
}
-impl<'tcx, 'map, 'a> Visitor<'tcx> for OperandCollector<'tcx, 'map, 'a> {
+impl<'tcx> Visitor<'tcx> for OperandCollector<'tcx, '_, '_, '_> {
+ fn visit_projection_elem(
+ &mut self,
+ _: PlaceRef<'tcx>,
+ elem: PlaceElem<'tcx>,
+ _: PlaceContext,
+ location: Location,
+ ) {
+ if let PlaceElem::Index(local) = elem
+ && let Some(value) = self.visitor.try_make_constant(local.into(), self.state, self.map)
+ {
+ self.visitor.patch.before_effect.insert((location, local.into()), value);
+ }
+ }
+
fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
- match operand {
- Operand::Copy(place) | Operand::Move(place) => {
- match self.state.get(place.as_ref(), self.map) {
- FlatSet::Top => (),
- FlatSet::Elem(value) => {
- self.visitor.before_effect.insert((location, *place), value);
- }
- FlatSet::Bottom => (),
- }
+ if let Some(place) = operand.place() {
+ if let Some(value) = self.visitor.try_make_constant(place, self.state, self.map) {
+ self.visitor.patch.before_effect.insert((location, place), value);
+ } else if !place.projection.is_empty() {
+ // Try to propagate into `Index` projections.
+ self.super_operand(operand, location)
}
- _ => (),
}
}
}
@@ -513,8 +708,11 @@ impl<'mir, 'tcx: 'mir> rustc_const_eval::interpret::Machine<'mir, 'tcx> for Dumm
type MemoryKind = !;
const PANIC_ON_ALLOC_FAIL: bool = true;
+ #[inline(always)]
fn enforce_alignment(_ecx: &InterpCx<'mir, 'tcx, Self>) -> CheckAlignment {
- unimplemented!()
+ // We do not check for alignment to avoid having to carry an `Align`
+ // in `ConstValue::ByRef`.
+ CheckAlignment::No
}
fn enforce_validity(_ecx: &InterpCx<'mir, 'tcx, Self>, _layout: TyAndLayout<'tcx>) -> bool {
@@ -529,6 +727,27 @@ impl<'mir, 'tcx: 'mir> rustc_const_eval::interpret::Machine<'mir, 'tcx> for Dumm
unimplemented!()
}
+ fn before_access_global(
+ _tcx: TyCtxt<'tcx>,
+ _machine: &Self,
+ _alloc_id: AllocId,
+ alloc: ConstAllocation<'tcx>,
+ _static_def_id: Option<DefId>,
+ is_write: bool,
+ ) -> InterpResult<'tcx> {
+ if is_write {
+ crate::const_prop::throw_machine_stop_str!("can't write to global");
+ }
+
+ // If the static allocation is mutable, then we can't const prop it as its content
+ // might be different at runtime.
+ if alloc.inner().mutability.is_mut() {
+ crate::const_prop::throw_machine_stop_str!("can't access mutable globals in ConstProp");
+ }
+
+ Ok(())
+ }
+
fn find_mir_or_eval_fn(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_instance: ty::Instance<'tcx>,
@@ -541,6 +760,13 @@ impl<'mir, 'tcx: 'mir> rustc_const_eval::interpret::Machine<'mir, 'tcx> for Dumm
unimplemented!()
}
+ fn panic_nounwind(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _msg: &str,
+ ) -> interpret::InterpResult<'tcx> {
+ unimplemented!()
+ }
+
fn call_intrinsic(
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
_instance: ty::Instance<'tcx>,
@@ -565,8 +791,8 @@ impl<'mir, 'tcx: 'mir> rustc_const_eval::interpret::Machine<'mir, 'tcx> for Dumm
_bin_op: BinOp,
_left: &rustc_const_eval::interpret::ImmTy<'tcx, Self::Provenance>,
_right: &rustc_const_eval::interpret::ImmTy<'tcx, Self::Provenance>,
- ) -> interpret::InterpResult<'tcx, (interpret::Scalar<Self::Provenance>, bool, Ty<'tcx>)> {
- throw_unsup!(Unsupported("".into()))
+ ) -> interpret::InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)> {
+ crate::const_prop::throw_machine_stop_str!("can't do pointer arithmetic");
}
fn expose_ptr(
@@ -590,7 +816,8 @@ impl<'mir, 'tcx: 'mir> rustc_const_eval::interpret::Machine<'mir, 'tcx> for Dumm
_ecx: &'a InterpCx<'mir, 'tcx, Self>,
) -> &'a [rustc_const_eval::interpret::Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>]
{
- unimplemented!()
+ // Return an empty stack instead of panicking, as `cur_span` uses it to evaluate constants.
+ &[]
}
fn stack_mut<'a>(
diff --git a/compiler/rustc_mir_transform/src/dead_store_elimination.rs b/compiler/rustc_mir_transform/src/dead_store_elimination.rs
index 3f988930b..ef1410504 100644
--- a/compiler/rustc_mir_transform/src/dead_store_elimination.rs
+++ b/compiler/rustc_mir_transform/src/dead_store_elimination.rs
@@ -12,6 +12,7 @@
//! will still not cause any further changes.
//!
+use crate::util::is_within_packed;
use rustc_index::bit_set::BitSet;
use rustc_middle::mir::visit::Visitor;
use rustc_middle::mir::*;
@@ -49,6 +50,11 @@ pub fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, borrowed: &BitS
&& !place.is_indirect()
&& !borrowed.contains(place.local)
&& !state.contains(place.local)
+ // If `place` is a projection of a disaligned field in a packed ADT,
+ // the move may be codegened as a pointer to that field.
+ // Using that disaligned pointer may trigger UB in the callee,
+ // so do nothing.
+ && is_within_packed(tcx, body, place).is_none()
{
call_operands_to_move.push((bb, index));
}
diff --git a/compiler/rustc_mir_transform/src/deduplicate_blocks.rs b/compiler/rustc_mir_transform/src/deduplicate_blocks.rs
index 909116a77..666293cbc 100644
--- a/compiler/rustc_mir_transform/src/deduplicate_blocks.rs
+++ b/compiler/rustc_mir_transform/src/deduplicate_blocks.rs
@@ -150,7 +150,7 @@ fn rvalue_hash<H: Hasher>(hasher: &mut H, rvalue: &Rvalue<'_>) {
fn operand_hash<H: Hasher>(hasher: &mut H, operand: &Operand<'_>) {
match operand {
- Operand::Constant(box Constant { user_ty: _, literal, span: _ }) => literal.hash(hasher),
+ Operand::Constant(box ConstOperand { user_ty: _, const_, span: _ }) => const_.hash(hasher),
x => x.hash(hasher),
};
}
@@ -179,9 +179,9 @@ fn rvalue_eq<'tcx>(lhs: &Rvalue<'tcx>, rhs: &Rvalue<'tcx>) -> bool {
fn operand_eq<'tcx>(lhs: &Operand<'tcx>, rhs: &Operand<'tcx>) -> bool {
let res = match (lhs, rhs) {
(
- Operand::Constant(box Constant { user_ty: _, literal, span: _ }),
- Operand::Constant(box Constant { user_ty: _, literal: literal2, span: _ }),
- ) => literal == literal2,
+ Operand::Constant(box ConstOperand { user_ty: _, const_, span: _ }),
+ Operand::Constant(box ConstOperand { user_ty: _, const_: const2, span: _ }),
+ ) => const_ == const2,
(x, y) => x == y,
};
debug!("operand_eq lhs: `{:?}` rhs: `{:?}` result: {:?}", lhs, rhs, res);
diff --git a/compiler/rustc_mir_transform/src/dest_prop.rs b/compiler/rustc_mir_transform/src/dest_prop.rs
index b73b72c31..d9a132e5c 100644
--- a/compiler/rustc_mir_transform/src/dest_prop.rs
+++ b/compiler/rustc_mir_transform/src/dest_prop.rs
@@ -647,8 +647,8 @@ impl WriteInfo {
}
}
TerminatorKind::Goto { .. }
- | TerminatorKind::Resume
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindResume
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
| TerminatorKind::Unreachable { .. } => (),
TerminatorKind::Drop { .. } => {
diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs
index b6b1ae6d3..b62d7da2a 100644
--- a/compiler/rustc_mir_transform/src/elaborate_drops.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs
@@ -170,6 +170,7 @@ impl<'a, 'tcx> DropElaborator<'a, 'tcx> for Elaborator<'a, '_, 'tcx> {
self.ctxt.param_env()
}
+ #[instrument(level = "debug", skip(self), ret)]
fn drop_style(&self, path: Self::Path, mode: DropFlagMode) -> DropStyle {
let ((maybe_live, maybe_dead), multipart) = match mode {
DropFlagMode::Shallow => (self.ctxt.init_data.maybe_live_dead(path), false),
@@ -362,8 +363,13 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
UnwindAction::Unreachable => {
Unwind::To(self.patch.unreachable_cleanup_block())
}
- UnwindAction::Terminate => {
- Unwind::To(self.patch.terminate_block())
+ UnwindAction::Terminate(reason) => {
+ debug_assert_ne!(
+ reason,
+ UnwindTerminateReason::InCleanup,
+ "we are not in a cleanup block, InCleanup reason should be impossible"
+ );
+ Unwind::To(self.patch.terminate_block(reason))
}
}
};
@@ -397,10 +403,10 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
}
fn constant_bool(&self, span: Span, val: bool) -> Rvalue<'tcx> {
- Rvalue::Use(Operand::Constant(Box::new(Constant {
+ Rvalue::Use(Operand::Constant(Box::new(ConstOperand {
span,
user_ty: None,
- literal: ConstantKind::from_bool(self.tcx, val),
+ const_: Const::from_bool(self.tcx, val),
})))
}
@@ -470,7 +476,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
// drop elaboration should handle that by itself
continue;
}
- TerminatorKind::Resume => {
+ TerminatorKind::UnwindResume => {
// It is possible for `Resume` to be patched
// (in particular it can be patched to be replaced with
// a Goto; see `MirPatch::new`).
@@ -496,7 +502,8 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
if let TerminatorKind::Call {
destination,
target: Some(_),
- unwind: UnwindAction::Continue | UnwindAction::Unreachable | UnwindAction::Terminate,
+ unwind:
+ UnwindAction::Continue | UnwindAction::Unreachable | UnwindAction::Terminate(_),
..
} = data.terminator().kind
{
diff --git a/compiler/rustc_mir_transform/src/errors.rs b/compiler/rustc_mir_transform/src/errors.rs
index 4b796d79e..5879a8039 100644
--- a/compiler/rustc_mir_transform/src/errors.rs
+++ b/compiler/rustc_mir_transform/src/errors.rs
@@ -4,7 +4,9 @@ use rustc_errors::{
};
use rustc_macros::{Diagnostic, LintDiagnostic, Subdiagnostic};
use rustc_middle::mir::{AssertKind, UnsafetyViolationDetails};
+use rustc_middle::ty::TyCtxt;
use rustc_session::lint::{self, Lint};
+use rustc_span::def_id::DefId;
use rustc_span::Span;
#[derive(LintDiagnostic)]
@@ -237,20 +239,38 @@ pub(crate) struct FnItemRef {
pub ident: String,
}
-#[derive(LintDiagnostic)]
-#[diag(mir_transform_must_not_suspend)]
-pub(crate) struct MustNotSupend<'a> {
- #[label]
+pub(crate) struct MustNotSupend<'tcx, 'a> {
+ pub tcx: TyCtxt<'tcx>,
pub yield_sp: Span,
- #[subdiagnostic]
pub reason: Option<MustNotSuspendReason>,
- #[help]
pub src_sp: Span,
pub pre: &'a str,
- pub def_path: String,
+ pub def_id: DefId,
pub post: &'a str,
}
+// Needed for def_path_str
+impl<'a> DecorateLint<'a, ()> for MustNotSupend<'_, '_> {
+ fn decorate_lint<'b>(
+ self,
+ diag: &'b mut rustc_errors::DiagnosticBuilder<'a, ()>,
+ ) -> &'b mut rustc_errors::DiagnosticBuilder<'a, ()> {
+ diag.span_label(self.yield_sp, crate::fluent_generated::_subdiag::label);
+ if let Some(reason) = self.reason {
+ diag.subdiagnostic(reason);
+ }
+ diag.span_help(self.src_sp, crate::fluent_generated::_subdiag::help);
+ diag.set_arg("pre", self.pre);
+ diag.set_arg("def_path", self.tcx.def_path_str(self.def_id));
+ diag.set_arg("post", self.post);
+ diag
+ }
+
+ fn msg(&self) -> rustc_errors::DiagnosticMessage {
+ crate::fluent_generated::mir_transform_must_not_suspend
+ }
+}
+
#[derive(Subdiagnostic)]
#[note(mir_transform_note)]
pub(crate) struct MustNotSuspendReason {
@@ -258,10 +278,3 @@ pub(crate) struct MustNotSuspendReason {
pub span: Span,
pub reason: String,
}
-
-#[derive(Diagnostic)]
-#[diag(mir_transform_simd_shuffle_last_const)]
-pub(crate) struct SimdShuffleLastConst {
- #[primary_span]
- pub span: Span,
-}
diff --git a/compiler/rustc_mir_transform/src/generator.rs b/compiler/rustc_mir_transform/src/generator.rs
index ff4822f33..e261b8ac2 100644
--- a/compiler/rustc_mir_transform/src/generator.rs
+++ b/compiler/rustc_mir_transform/src/generator.rs
@@ -583,6 +583,14 @@ struct LivenessInfo {
storage_liveness: IndexVec<BasicBlock, Option<BitSet<Local>>>,
}
+/// Computes which locals have to be stored in the state-machine for the
+/// given coroutine.
+///
+/// The basic idea is as follows:
+/// - a local is live until we encounter a `StorageDead` statement. In
+/// case none exist, the local is considered to be always live.
+/// - a local has to be stored if it is either directly used after the
+/// the suspend point, or if it is live and has been previously borrowed.
fn locals_live_across_suspend_points<'tcx>(
tcx: TyCtxt<'tcx>,
body: &Body<'tcx>,
@@ -814,7 +822,7 @@ impl<'mir, 'tcx, R> rustc_mir_dataflow::ResultsVisitor<'mir, 'tcx, R>
fn visit_statement_before_primary_effect(
&mut self,
- _results: &R,
+ _results: &mut R,
state: &Self::FlowState,
_statement: &'mir Statement<'tcx>,
loc: Location,
@@ -824,7 +832,7 @@ impl<'mir, 'tcx, R> rustc_mir_dataflow::ResultsVisitor<'mir, 'tcx, R>
fn visit_terminator_before_primary_effect(
&mut self,
- _results: &R,
+ _results: &mut R,
state: &Self::FlowState,
_terminator: &'mir Terminator<'tcx>,
loc: Location,
@@ -853,60 +861,7 @@ impl StorageConflictVisitor<'_, '_, '_> {
}
}
-/// Validates the typeck view of the generator against the actual set of types saved between
-/// yield points.
-fn sanitize_witness<'tcx>(
- tcx: TyCtxt<'tcx>,
- body: &Body<'tcx>,
- witness: Ty<'tcx>,
- upvars: &'tcx ty::List<Ty<'tcx>>,
- layout: &GeneratorLayout<'tcx>,
-) {
- let did = body.source.def_id();
- let param_env = tcx.param_env(did);
-
- let allowed_upvars = tcx.normalize_erasing_regions(param_env, upvars);
- let allowed = match witness.kind() {
- &ty::GeneratorWitness(interior_tys) => {
- tcx.normalize_erasing_late_bound_regions(param_env, interior_tys)
- }
- _ => {
- tcx.sess.delay_span_bug(
- body.span,
- format!("unexpected generator witness type {:?}", witness.kind()),
- );
- return;
- }
- };
-
- let mut mismatches = Vec::new();
- for fty in &layout.field_tys {
- if fty.ignore_for_traits {
- continue;
- }
- let decl_ty = tcx.normalize_erasing_regions(param_env, fty.ty);
-
- // Sanity check that typeck knows about the type of locals which are
- // live across a suspension point
- if !allowed.contains(&decl_ty) && !allowed_upvars.contains(&decl_ty) {
- mismatches.push(decl_ty);
- }
- }
-
- if !mismatches.is_empty() {
- span_bug!(
- body.span,
- "Broken MIR: generator contains type {:?} in MIR, \
- but typeck only knows about {} and {:?}",
- mismatches,
- allowed,
- allowed_upvars
- );
- }
-}
-
fn compute_layout<'tcx>(
- tcx: TyCtxt<'tcx>,
liveness: LivenessInfo,
body: &Body<'tcx>,
) -> (
@@ -932,27 +887,20 @@ fn compute_layout<'tcx>(
let decl = &body.local_decls[local];
debug!(?decl);
- let ignore_for_traits = if tcx.sess.opts.unstable_opts.drop_tracking_mir {
- // Do not `assert_crate_local` here, as post-borrowck cleanup may have already cleared
- // the information. This is alright, since `ignore_for_traits` is only relevant when
- // this code runs on pre-cleanup MIR, and `ignore_for_traits = false` is the safer
- // default.
- match decl.local_info {
- // Do not include raw pointers created from accessing `static` items, as those could
- // well be re-created by another access to the same static.
- ClearCrossCrate::Set(box LocalInfo::StaticRef { is_thread_local, .. }) => {
- !is_thread_local
- }
- // Fake borrows are only read by fake reads, so do not have any reality in
- // post-analysis MIR.
- ClearCrossCrate::Set(box LocalInfo::FakeBorrow) => true,
- _ => false,
+ // Do not `assert_crate_local` here, as post-borrowck cleanup may have already cleared
+ // the information. This is alright, since `ignore_for_traits` is only relevant when
+ // this code runs on pre-cleanup MIR, and `ignore_for_traits = false` is the safer
+ // default.
+ let ignore_for_traits = match decl.local_info {
+ // Do not include raw pointers created from accessing `static` items, as those could
+ // well be re-created by another access to the same static.
+ ClearCrossCrate::Set(box LocalInfo::StaticRef { is_thread_local, .. }) => {
+ !is_thread_local
}
- } else {
- // FIXME(#105084) HIR-based drop tracking does not account for all the temporaries that
- // MIR building may introduce. This leads to wrongly ignored types, but this is
- // necessary for internal consistency and to avoid ICEs.
- decl.internal
+ // Fake borrows are only read by fake reads, so do not have any reality in
+ // post-analysis MIR.
+ ClearCrossCrate::Set(box LocalInfo::FakeBorrow) => true,
+ _ => false,
};
let decl =
GeneratorSavedTy { ty: decl.ty, source_info: decl.source_info, ignore_for_traits };
@@ -1091,7 +1039,7 @@ fn elaborate_generator_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
UnwindAction::Cleanup(tgt) => tgt,
UnwindAction::Continue => elaborator.patch.resume_block(),
UnwindAction::Unreachable => elaborator.patch.unreachable_cleanup_block(),
- UnwindAction::Terminate => elaborator.patch.terminate_block(),
+ UnwindAction::Terminate(reason) => elaborator.patch.terminate_block(reason),
})
};
elaborate_drop(
@@ -1189,10 +1137,10 @@ fn insert_panic_block<'tcx>(
) -> BasicBlock {
let assert_block = BasicBlock::new(body.basic_blocks.len());
let term = TerminatorKind::Assert {
- cond: Operand::Constant(Box::new(Constant {
+ cond: Operand::Constant(Box::new(ConstOperand {
span: body.span,
user_ty: None,
- literal: ConstantKind::from_bool(tcx, false),
+ const_: Const::from_bool(tcx, false),
})),
expected: true,
msg: Box::new(message),
@@ -1239,7 +1187,7 @@ fn can_unwind<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> bool {
// These never unwind.
TerminatorKind::Goto { .. }
| TerminatorKind::SwitchInt { .. }
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::GeneratorDrop
@@ -1248,7 +1196,7 @@ fn can_unwind<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> bool {
// Resume will *continue* unwinding, but if there's no other unwinding terminator it
// will never be reached.
- TerminatorKind::Resume => {}
+ TerminatorKind::UnwindResume => {}
TerminatorKind::Yield { .. } => {
unreachable!("`can_unwind` called before generator transform")
@@ -1279,14 +1227,14 @@ fn create_generator_resume_function<'tcx>(
let source_info = SourceInfo::outermost(body.span);
let poison_block = body.basic_blocks_mut().push(BasicBlockData {
statements: vec![transform.set_discr(VariantIdx::new(POISONED), source_info)],
- terminator: Some(Terminator { source_info, kind: TerminatorKind::Resume }),
+ terminator: Some(Terminator { source_info, kind: TerminatorKind::UnwindResume }),
is_cleanup: true,
});
for (idx, block) in body.basic_blocks_mut().iter_enumerated_mut() {
let source_info = block.terminator().source_info;
- if let TerminatorKind::Resume = block.terminator().kind {
+ if let TerminatorKind::UnwindResume = block.terminator().kind {
// An existing `Resume` terminator is redirected to jump to our dedicated
// "poisoning block" above.
if idx != poison_block {
@@ -1445,8 +1393,6 @@ pub(crate) fn mir_generator_witnesses<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: LocalDefId,
) -> Option<GeneratorLayout<'tcx>> {
- assert!(tcx.sess.opts.unstable_opts.drop_tracking_mir);
-
let (body, _) = tcx.mir_promoted(def_id);
let body = body.borrow();
let body = &*body;
@@ -1454,22 +1400,21 @@ pub(crate) fn mir_generator_witnesses<'tcx>(
// The first argument is the generator type passed by value
let gen_ty = body.local_decls[ty::CAPTURE_STRUCT_LOCAL].ty;
- // Get the interior types and args which typeck computed
let movable = match *gen_ty.kind() {
ty::Generator(_, _, movability) => movability == hir::Movability::Movable,
ty::Error(_) => return None,
_ => span_bug!(body.span, "unexpected generator type {}", gen_ty),
};
- // When first entering the generator, move the resume argument into its new local.
- let always_live_locals = always_storage_live_locals(&body);
+ // The witness simply contains all locals live across suspend points.
+ let always_live_locals = always_storage_live_locals(&body);
let liveness_info = locals_live_across_suspend_points(tcx, body, &always_live_locals, movable);
// Extract locals which are live across suspension point into `layout`
// `remap` gives a mapping from local indices onto generator struct indices
// `storage_liveness` tells us which locals have live storage at suspension points
- let (_, generator_layout, _) = compute_layout(tcx, liveness_info, body);
+ let (_, generator_layout, _) = compute_layout(liveness_info, body);
check_suspend_tys(tcx, &generator_layout, &body);
@@ -1489,15 +1434,10 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
let gen_ty = body.local_decls.raw[1].ty;
// Get the discriminant type and args which typeck computed
- let (discr_ty, upvars, interior, movable) = match *gen_ty.kind() {
+ let (discr_ty, movable) = match *gen_ty.kind() {
ty::Generator(_, args, movability) => {
let args = args.as_generator();
- (
- args.discr_ty(tcx),
- args.upvar_tys(),
- args.witness(),
- movability == hir::Movability::Movable,
- )
+ (args.discr_ty(tcx), movability == hir::Movability::Movable)
}
_ => {
tcx.sess.delay_span_bug(body.span, format!("unexpected generator type {gen_ty}"));
@@ -1574,13 +1514,7 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
// Extract locals which are live across suspension point into `layout`
// `remap` gives a mapping from local indices onto generator struct indices
// `storage_liveness` tells us which locals have live storage at suspension points
- let (remap, layout, storage_liveness) = compute_layout(tcx, liveness_info, body);
-
- if tcx.sess.opts.unstable_opts.validate_mir
- && !tcx.sess.opts.unstable_opts.drop_tracking_mir
- {
- sanitize_witness(tcx, body, interior, upvars, &layout);
- }
+ let (remap, layout, storage_liveness) = compute_layout(liveness_info, body);
let can_return = can_return(tcx, body, tcx.param_env(body.source.def_id()));
@@ -1758,8 +1692,8 @@ impl<'tcx> Visitor<'tcx> for EnsureGeneratorFieldAssignmentsNeverAlias<'_> {
TerminatorKind::Call { .. }
| TerminatorKind::Goto { .. }
| TerminatorKind::SwitchInt { .. }
- | TerminatorKind::Resume
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindResume
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::Drop { .. }
@@ -1954,11 +1888,12 @@ fn check_must_not_suspend_def(
hir_id,
data.source_span,
errors::MustNotSupend {
+ tcx,
yield_sp: data.yield_span,
reason,
src_sp: data.source_span,
pre: data.descr_pre,
- def_path: tcx.def_path_str(def_id),
+ def_id,
post: data.descr_post,
},
);
diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs
new file mode 100644
index 000000000..56bdc5a17
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/gvn.rs
@@ -0,0 +1,539 @@
+//! Global value numbering.
+//!
+//! MIR may contain repeated and/or redundant computations. The objective of this pass is to detect
+//! such redundancies and re-use the already-computed result when possible.
+//!
+//! In a first pass, we compute a symbolic representation of values that are assigned to SSA
+//! locals. This symbolic representation is defined by the `Value` enum. Each produced instance of
+//! `Value` is interned as a `VnIndex`, which allows us to cheaply compute identical values.
+//!
+//! From those assignments, we construct a mapping `VnIndex -> Vec<(Local, Location)>` of available
+//! values, the locals in which they are stored, and a the assignment location.
+//!
+//! In a second pass, we traverse all (non SSA) assignments `x = rvalue` and operands. For each
+//! one, we compute the `VnIndex` of the rvalue. If this `VnIndex` is associated to a constant, we
+//! replace the rvalue/operand by that constant. Otherwise, if there is an SSA local `y`
+//! associated to this `VnIndex`, and if its definition location strictly dominates the assignment
+//! to `x`, we replace the assignment by `x = y`.
+//!
+//! By opportunity, this pass simplifies some `Rvalue`s based on the accumulated knowledge.
+//!
+//! # Operational semantic
+//!
+//! Operationally, this pass attempts to prove bitwise equality between locals. Given this MIR:
+//! ```ignore (MIR)
+//! _a = some value // has VnIndex i
+//! // some MIR
+//! _b = some other value // also has VnIndex i
+//! ```
+//!
+//! We consider it to be replacable by:
+//! ```ignore (MIR)
+//! _a = some value // has VnIndex i
+//! // some MIR
+//! _c = some other value // also has VnIndex i
+//! assume(_a bitwise equal to _c) // follows from having the same VnIndex
+//! _b = _a // follows from the `assume`
+//! ```
+//!
+//! Which is simplifiable to:
+//! ```ignore (MIR)
+//! _a = some value // has VnIndex i
+//! // some MIR
+//! _b = _a
+//! ```
+//!
+//! # Handling of references
+//!
+//! We handle references by assigning a different "provenance" index to each Ref/AddressOf rvalue.
+//! This ensure that we do not spuriously merge borrows that should not be merged. Meanwhile, we
+//! consider all the derefs of an immutable reference to a freeze type to give the same value:
+//! ```ignore (MIR)
+//! _a = *_b // _b is &Freeze
+//! _c = *_b // replaced by _c = _a
+//! ```
+
+use rustc_data_structures::fx::{FxHashMap, FxIndexSet};
+use rustc_data_structures::graph::dominators::Dominators;
+use rustc_index::bit_set::BitSet;
+use rustc_index::IndexVec;
+use rustc_macros::newtype_index;
+use rustc_middle::mir::visit::*;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_target::abi::{VariantIdx, FIRST_VARIANT};
+
+use crate::ssa::SsaLocals;
+use crate::MirPass;
+
+pub struct GVN;
+
+impl<'tcx> MirPass<'tcx> for GVN {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() >= 4
+ }
+
+ #[instrument(level = "trace", skip(self, tcx, body))]
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ debug!(def_id = ?body.source.def_id());
+ propagate_ssa(tcx, body);
+ }
+}
+
+fn propagate_ssa<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
+ let ssa = SsaLocals::new(body);
+ // Clone dominators as we need them while mutating the body.
+ let dominators = body.basic_blocks.dominators().clone();
+
+ let mut state = VnState::new(tcx, param_env, &ssa, &dominators, &body.local_decls);
+ for arg in body.args_iter() {
+ if ssa.is_ssa(arg) {
+ let value = state.new_opaque().unwrap();
+ state.assign(arg, value);
+ }
+ }
+
+ ssa.for_each_assignment_mut(&mut body.basic_blocks, |local, rvalue, location| {
+ let value = state.simplify_rvalue(rvalue, location).or_else(|| state.new_opaque()).unwrap();
+ // FIXME(#112651) `rvalue` may have a subtype to `local`. We can only mark `local` as
+ // reusable if we have an exact type match.
+ if state.local_decls[local].ty == rvalue.ty(state.local_decls, tcx) {
+ state.assign(local, value);
+ }
+ });
+
+ // Stop creating opaques during replacement as it is useless.
+ state.next_opaque = None;
+
+ let reverse_postorder = body.basic_blocks.reverse_postorder().to_vec();
+ for bb in reverse_postorder {
+ let data = &mut body.basic_blocks.as_mut_preserves_cfg()[bb];
+ state.visit_basic_block_data(bb, data);
+ }
+ let any_replacement = state.any_replacement;
+
+ // For each local that is reused (`y` above), we remove its storage statements do avoid any
+ // difficulty. Those locals are SSA, so should be easy to optimize by LLVM without storage
+ // statements.
+ StorageRemover { tcx, reused_locals: state.reused_locals }.visit_body_preserves_cfg(body);
+
+ if any_replacement {
+ crate::simplify::remove_unused_definitions(body);
+ }
+}
+
+newtype_index! {
+ struct VnIndex {}
+}
+
+#[derive(Debug, PartialEq, Eq, Hash)]
+enum Value<'tcx> {
+ // Root values.
+ /// Used to represent values we know nothing about.
+ /// The `usize` is a counter incremented by `new_opaque`.
+ Opaque(usize),
+ /// Evaluated or unevaluated constant value.
+ Constant(Const<'tcx>),
+ /// An aggregate value, either tuple/closure/struct/enum.
+ /// This does not contain unions, as we cannot reason with the value.
+ Aggregate(Ty<'tcx>, VariantIdx, Vec<VnIndex>),
+ /// This corresponds to a `[value; count]` expression.
+ Repeat(VnIndex, ty::Const<'tcx>),
+ /// The address of a place.
+ Address {
+ place: Place<'tcx>,
+ /// Give each borrow and pointer a different provenance, so we don't merge them.
+ provenance: usize,
+ },
+
+ // Extractions.
+ /// This is the *value* obtained by projecting another value.
+ Projection(VnIndex, ProjectionElem<VnIndex, Ty<'tcx>>),
+ /// Discriminant of the given value.
+ Discriminant(VnIndex),
+ /// Length of an array or slice.
+ Len(VnIndex),
+
+ // Operations.
+ NullaryOp(NullOp<'tcx>, Ty<'tcx>),
+ UnaryOp(UnOp, VnIndex),
+ BinaryOp(BinOp, VnIndex, VnIndex),
+ CheckedBinaryOp(BinOp, VnIndex, VnIndex),
+ Cast {
+ kind: CastKind,
+ value: VnIndex,
+ from: Ty<'tcx>,
+ to: Ty<'tcx>,
+ },
+}
+
+struct VnState<'body, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ local_decls: &'body LocalDecls<'tcx>,
+ /// Value stored in each local.
+ locals: IndexVec<Local, Option<VnIndex>>,
+ /// First local to be assigned that value.
+ rev_locals: FxHashMap<VnIndex, Vec<Local>>,
+ values: FxIndexSet<Value<'tcx>>,
+ /// Counter to generate different values.
+ /// This is an option to stop creating opaques during replacement.
+ next_opaque: Option<usize>,
+ ssa: &'body SsaLocals,
+ dominators: &'body Dominators<BasicBlock>,
+ reused_locals: BitSet<Local>,
+ any_replacement: bool,
+}
+
+impl<'body, 'tcx> VnState<'body, 'tcx> {
+ fn new(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ssa: &'body SsaLocals,
+ dominators: &'body Dominators<BasicBlock>,
+ local_decls: &'body LocalDecls<'tcx>,
+ ) -> Self {
+ VnState {
+ tcx,
+ param_env,
+ local_decls,
+ locals: IndexVec::from_elem(None, local_decls),
+ rev_locals: FxHashMap::default(),
+ values: FxIndexSet::default(),
+ next_opaque: Some(0),
+ ssa,
+ dominators,
+ reused_locals: BitSet::new_empty(local_decls.len()),
+ any_replacement: false,
+ }
+ }
+
+ #[instrument(level = "trace", skip(self), ret)]
+ fn insert(&mut self, value: Value<'tcx>) -> VnIndex {
+ let (index, _) = self.values.insert_full(value);
+ VnIndex::from_usize(index)
+ }
+
+ /// Create a new `Value` for which we have no information at all, except that it is distinct
+ /// from all the others.
+ #[instrument(level = "trace", skip(self), ret)]
+ fn new_opaque(&mut self) -> Option<VnIndex> {
+ let next_opaque = self.next_opaque.as_mut()?;
+ let value = Value::Opaque(*next_opaque);
+ *next_opaque += 1;
+ Some(self.insert(value))
+ }
+
+ /// Create a new `Value::Address` distinct from all the others.
+ #[instrument(level = "trace", skip(self), ret)]
+ fn new_pointer(&mut self, place: Place<'tcx>) -> Option<VnIndex> {
+ let next_opaque = self.next_opaque.as_mut()?;
+ let value = Value::Address { place, provenance: *next_opaque };
+ *next_opaque += 1;
+ Some(self.insert(value))
+ }
+
+ fn get(&self, index: VnIndex) -> &Value<'tcx> {
+ self.values.get_index(index.as_usize()).unwrap()
+ }
+
+ /// Record that `local` is assigned `value`. `local` must be SSA.
+ #[instrument(level = "trace", skip(self))]
+ fn assign(&mut self, local: Local, value: VnIndex) {
+ self.locals[local] = Some(value);
+
+ // Only register the value if its type is `Sized`, as we will emit copies of it.
+ let is_sized = !self.tcx.features().unsized_locals
+ || self.local_decls[local].ty.is_sized(self.tcx, self.param_env);
+ if is_sized {
+ self.rev_locals.entry(value).or_default().push(local);
+ }
+ }
+
+ /// Represent the *value* which would be read from `place`, and point `place` to a preexisting
+ /// place with the same value (if that already exists).
+ #[instrument(level = "trace", skip(self), ret)]
+ fn simplify_place_value(
+ &mut self,
+ place: &mut Place<'tcx>,
+ location: Location,
+ ) -> Option<VnIndex> {
+ // Invariant: `place` and `place_ref` point to the same value, even if they point to
+ // different memory locations.
+ let mut place_ref = place.as_ref();
+
+ // Invariant: `value` holds the value up-to the `index`th projection excluded.
+ let mut value = self.locals[place.local]?;
+ for (index, proj) in place.projection.iter().enumerate() {
+ if let Some(local) = self.try_as_local(value, location) {
+ // Both `local` and `Place { local: place.local, projection: projection[..index] }`
+ // hold the same value. Therefore, following place holds the value in the original
+ // `place`.
+ place_ref = PlaceRef { local, projection: &place.projection[index..] };
+ }
+
+ let proj = match proj {
+ ProjectionElem::Deref => {
+ let ty = Place::ty_from(
+ place.local,
+ &place.projection[..index],
+ self.local_decls,
+ self.tcx,
+ )
+ .ty;
+ if let Some(Mutability::Not) = ty.ref_mutability()
+ && let Some(pointee_ty) = ty.builtin_deref(true)
+ && pointee_ty.ty.is_freeze(self.tcx, self.param_env)
+ {
+ // An immutable borrow `_x` always points to the same value for the
+ // lifetime of the borrow, so we can merge all instances of `*_x`.
+ ProjectionElem::Deref
+ } else {
+ return None;
+ }
+ }
+ ProjectionElem::Field(f, ty) => ProjectionElem::Field(f, ty),
+ ProjectionElem::Index(idx) => {
+ let idx = self.locals[idx]?;
+ ProjectionElem::Index(idx)
+ }
+ ProjectionElem::ConstantIndex { offset, min_length, from_end } => {
+ ProjectionElem::ConstantIndex { offset, min_length, from_end }
+ }
+ ProjectionElem::Subslice { from, to, from_end } => {
+ ProjectionElem::Subslice { from, to, from_end }
+ }
+ ProjectionElem::Downcast(name, index) => ProjectionElem::Downcast(name, index),
+ ProjectionElem::OpaqueCast(ty) => ProjectionElem::OpaqueCast(ty),
+ ProjectionElem::Subtype(ty) => ProjectionElem::Subtype(ty),
+ };
+ value = self.insert(Value::Projection(value, proj));
+ }
+
+ if let Some(local) = self.try_as_local(value, location)
+ && local != place.local // in case we had no projection to begin with.
+ {
+ *place = local.into();
+ self.reused_locals.insert(local);
+ self.any_replacement = true;
+ } else if place_ref.local != place.local
+ || place_ref.projection.len() < place.projection.len()
+ {
+ // By the invariant on `place_ref`.
+ *place = place_ref.project_deeper(&[], self.tcx);
+ self.reused_locals.insert(place_ref.local);
+ self.any_replacement = true;
+ }
+
+ Some(value)
+ }
+
+ #[instrument(level = "trace", skip(self), ret)]
+ fn simplify_operand(
+ &mut self,
+ operand: &mut Operand<'tcx>,
+ location: Location,
+ ) -> Option<VnIndex> {
+ match *operand {
+ Operand::Constant(ref constant) => Some(self.insert(Value::Constant(constant.const_))),
+ Operand::Copy(ref mut place) | Operand::Move(ref mut place) => {
+ let value = self.simplify_place_value(place, location)?;
+ if let Some(const_) = self.try_as_constant(value) {
+ *operand = Operand::Constant(Box::new(const_));
+ self.any_replacement = true;
+ }
+ Some(value)
+ }
+ }
+ }
+
+ #[instrument(level = "trace", skip(self), ret)]
+ fn simplify_rvalue(
+ &mut self,
+ rvalue: &mut Rvalue<'tcx>,
+ location: Location,
+ ) -> Option<VnIndex> {
+ let value = match *rvalue {
+ // Forward values.
+ Rvalue::Use(ref mut operand) => return self.simplify_operand(operand, location),
+ Rvalue::CopyForDeref(place) => {
+ let mut operand = Operand::Copy(place);
+ let val = self.simplify_operand(&mut operand, location);
+ *rvalue = Rvalue::Use(operand);
+ return val;
+ }
+
+ // Roots.
+ Rvalue::Repeat(ref mut op, amount) => {
+ let op = self.simplify_operand(op, location)?;
+ Value::Repeat(op, amount)
+ }
+ Rvalue::NullaryOp(op, ty) => Value::NullaryOp(op, ty),
+ Rvalue::Aggregate(box ref kind, ref mut fields) => {
+ let variant_index = match *kind {
+ AggregateKind::Array(..)
+ | AggregateKind::Tuple
+ | AggregateKind::Closure(..)
+ | AggregateKind::Generator(..) => FIRST_VARIANT,
+ AggregateKind::Adt(_, variant_index, _, _, None) => variant_index,
+ // Do not track unions.
+ AggregateKind::Adt(_, _, _, _, Some(_)) => return None,
+ };
+ let fields: Option<Vec<_>> = fields
+ .iter_mut()
+ .map(|op| self.simplify_operand(op, location).or_else(|| self.new_opaque()))
+ .collect();
+ let ty = rvalue.ty(self.local_decls, self.tcx);
+ Value::Aggregate(ty, variant_index, fields?)
+ }
+ Rvalue::Ref(.., place) | Rvalue::AddressOf(_, place) => return self.new_pointer(place),
+
+ // Operations.
+ Rvalue::Len(ref mut place) => {
+ let place = self.simplify_place_value(place, location)?;
+ Value::Len(place)
+ }
+ Rvalue::Cast(kind, ref mut value, to) => {
+ let from = value.ty(self.local_decls, self.tcx);
+ let value = self.simplify_operand(value, location)?;
+ Value::Cast { kind, value, from, to }
+ }
+ Rvalue::BinaryOp(op, box (ref mut lhs, ref mut rhs)) => {
+ let lhs = self.simplify_operand(lhs, location);
+ let rhs = self.simplify_operand(rhs, location);
+ Value::BinaryOp(op, lhs?, rhs?)
+ }
+ Rvalue::CheckedBinaryOp(op, box (ref mut lhs, ref mut rhs)) => {
+ let lhs = self.simplify_operand(lhs, location);
+ let rhs = self.simplify_operand(rhs, location);
+ Value::CheckedBinaryOp(op, lhs?, rhs?)
+ }
+ Rvalue::UnaryOp(op, ref mut arg) => {
+ let arg = self.simplify_operand(arg, location)?;
+ Value::UnaryOp(op, arg)
+ }
+ Rvalue::Discriminant(ref mut place) => {
+ let place = self.simplify_place_value(place, location)?;
+ Value::Discriminant(place)
+ }
+
+ // Unsupported values.
+ Rvalue::ThreadLocalRef(..) | Rvalue::ShallowInitBox(..) => return None,
+ };
+ debug!(?value);
+ Some(self.insert(value))
+ }
+}
+
+impl<'tcx> VnState<'_, 'tcx> {
+ /// If `index` is a `Value::Constant`, return the `Constant` to be put in the MIR.
+ fn try_as_constant(&mut self, index: VnIndex) -> Option<ConstOperand<'tcx>> {
+ if let Value::Constant(const_) = *self.get(index) {
+ // Some constants may contain pointers. We need to preserve the provenance of these
+ // pointers, but not all constants guarantee this:
+ // - valtrees purposefully do not;
+ // - ConstValue::Slice does not either.
+ match const_ {
+ Const::Ty(c) => match c.kind() {
+ ty::ConstKind::Value(valtree) => match valtree {
+ // This is just an integer, keep it.
+ ty::ValTree::Leaf(_) => {}
+ ty::ValTree::Branch(_) => return None,
+ },
+ ty::ConstKind::Param(..)
+ | ty::ConstKind::Unevaluated(..)
+ | ty::ConstKind::Expr(..) => {}
+ // Should not appear in runtime MIR.
+ ty::ConstKind::Infer(..)
+ | ty::ConstKind::Bound(..)
+ | ty::ConstKind::Placeholder(..)
+ | ty::ConstKind::Error(..) => bug!(),
+ },
+ Const::Unevaluated(..) => {}
+ // If the same slice appears twice in the MIR, we cannot guarantee that we will
+ // give the same `AllocId` to the data.
+ Const::Val(ConstValue::Slice { .. }, _) => return None,
+ Const::Val(
+ ConstValue::ZeroSized | ConstValue::Scalar(_) | ConstValue::Indirect { .. },
+ _,
+ ) => {}
+ }
+ Some(ConstOperand { span: rustc_span::DUMMY_SP, user_ty: None, const_ })
+ } else {
+ None
+ }
+ }
+
+ /// If there is a local which is assigned `index`, and its assignment strictly dominates `loc`,
+ /// return it.
+ fn try_as_local(&mut self, index: VnIndex, loc: Location) -> Option<Local> {
+ let other = self.rev_locals.get(&index)?;
+ other
+ .iter()
+ .copied()
+ .find(|&other| self.ssa.assignment_dominates(self.dominators, other, loc))
+ }
+}
+
+impl<'tcx> MutVisitor<'tcx> for VnState<'_, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_operand(&mut self, operand: &mut Operand<'tcx>, location: Location) {
+ self.simplify_operand(operand, location);
+ }
+
+ fn visit_statement(&mut self, stmt: &mut Statement<'tcx>, location: Location) {
+ self.super_statement(stmt, location);
+ if let StatementKind::Assign(box (_, ref mut rvalue)) = stmt.kind
+ // Do not try to simplify a constant, it's already in canonical shape.
+ && !matches!(rvalue, Rvalue::Use(Operand::Constant(_)))
+ && let Some(value) = self.simplify_rvalue(rvalue, location)
+ {
+ if let Some(const_) = self.try_as_constant(value) {
+ *rvalue = Rvalue::Use(Operand::Constant(Box::new(const_)));
+ self.any_replacement = true;
+ } else if let Some(local) = self.try_as_local(value, location)
+ && *rvalue != Rvalue::Use(Operand::Move(local.into()))
+ {
+ *rvalue = Rvalue::Use(Operand::Copy(local.into()));
+ self.reused_locals.insert(local);
+ self.any_replacement = true;
+ }
+ }
+ }
+}
+
+struct StorageRemover<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ reused_locals: BitSet<Local>,
+}
+
+impl<'tcx> MutVisitor<'tcx> for StorageRemover<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_operand(&mut self, operand: &mut Operand<'tcx>, _: Location) {
+ if let Operand::Move(place) = *operand
+ && let Some(local) = place.as_local()
+ && self.reused_locals.contains(local)
+ {
+ *operand = Operand::Copy(place);
+ }
+ }
+
+ fn visit_statement(&mut self, stmt: &mut Statement<'tcx>, loc: Location) {
+ match stmt.kind {
+ // When removing storage statements, we need to remove both (#107511).
+ StatementKind::StorageLive(l) | StatementKind::StorageDead(l)
+ if self.reused_locals.contains(l) =>
+ {
+ stmt.make_nop()
+ }
+ _ => self.super_statement(stmt, loc),
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs
index fc9e18378..b53e0852c 100644
--- a/compiler/rustc_mir_transform/src/inline.rs
+++ b/compiler/rustc_mir_transform/src/inline.rs
@@ -193,7 +193,7 @@ impl<'tcx> Inliner<'tcx> {
return Err("optimization fuel exhausted");
}
- let Ok(callee_body) = callsite.callee.try_subst_mir_and_normalize_erasing_regions(
+ let Ok(callee_body) = callsite.callee.try_instantiate_mir_and_normalize_erasing_regions(
self.tcx,
self.param_env,
ty::EarlyBinder::bind(callee_body.clone()),
@@ -218,7 +218,13 @@ impl<'tcx> Inliner<'tcx> {
// Normally, this shouldn't be required, but trait normalization failure can create a
// validation ICE.
let output_type = callee_body.return_ty();
- if !util::is_subtype(self.tcx, self.param_env, output_type, destination_ty) {
+ if !util::relate_types(
+ self.tcx,
+ self.param_env,
+ ty::Variance::Covariant,
+ output_type,
+ destination_ty,
+ ) {
trace!(?output_type, ?destination_ty);
return Err("failed to normalize return type");
}
@@ -248,7 +254,13 @@ impl<'tcx> Inliner<'tcx> {
self_arg_ty.into_iter().chain(arg_tuple_tys).zip(callee_body.args_iter())
{
let input_type = callee_body.local_decls[input].ty;
- if !util::is_subtype(self.tcx, self.param_env, input_type, arg_ty) {
+ if !util::relate_types(
+ self.tcx,
+ self.param_env,
+ ty::Variance::Covariant,
+ input_type,
+ arg_ty,
+ ) {
trace!(?arg_ty, ?input_type);
return Err("failed to normalize tuple argument type");
}
@@ -257,7 +269,13 @@ impl<'tcx> Inliner<'tcx> {
for (arg, input) in args.iter().zip(callee_body.args_iter()) {
let input_type = callee_body.local_decls[input].ty;
let arg_ty = arg.ty(&caller_body.local_decls, self.tcx);
- if !util::is_subtype(self.tcx, self.param_env, input_type, arg_ty) {
+ if !util::relate_types(
+ self.tcx,
+ self.param_env,
+ ty::Variance::Covariant,
+ input_type,
+ arg_ty,
+ ) {
trace!(?arg_ty, ?input_type);
return Err("failed to normalize argument type");
}
@@ -388,14 +406,16 @@ impl<'tcx> Inliner<'tcx> {
return Err("never inline hint");
}
- // Only inline local functions if they would be eligible for cross-crate
- // inlining. This is to ensure that the final crate doesn't have MIR that
- // reference unexported symbols
- if callsite.callee.def_id().is_local() {
- let is_generic = callsite.callee.args.non_erasable_generics().next().is_some();
- if !is_generic && !callee_attrs.requests_inline() {
- return Err("not exported");
- }
+ // Reachability pass defines which functions are eligible for inlining. Generally inlining
+ // other functions is incorrect because they could reference symbols that aren't exported.
+ let is_generic = callsite
+ .callee
+ .args
+ .non_erasable_generics(self.tcx, callsite.callee.def_id())
+ .next()
+ .is_some();
+ if !is_generic && !callee_attrs.requests_inline() {
+ return Err("not exported");
}
if callsite.fn_sig.c_variadic() {
@@ -479,9 +499,10 @@ impl<'tcx> Inliner<'tcx> {
work_list.push(target);
// If the place doesn't actually need dropping, treat it like a regular goto.
- let ty = callsite
- .callee
- .subst_mir(self.tcx, ty::EarlyBinder::bind(&place.ty(callee_body, tcx).ty));
+ let ty = callsite.callee.instantiate_mir(
+ self.tcx,
+ ty::EarlyBinder::bind(&place.ty(callee_body, tcx).ty),
+ );
if ty.needs_drop(tcx, self.param_env) && let UnwindAction::Cleanup(unwind) = unwind {
work_list.push(unwind);
}
@@ -648,13 +669,13 @@ impl<'tcx> Inliner<'tcx> {
// Copy only unevaluated constants from the callee_body into the caller_body.
// Although we are only pushing `ConstKind::Unevaluated` consts to
// `required_consts`, here we may not only have `ConstKind::Unevaluated`
- // because we are calling `subst_and_normalize_erasing_regions`.
+ // because we are calling `instantiate_and_normalize_erasing_regions`.
caller_body.required_consts.extend(
- callee_body.required_consts.iter().copied().filter(|&ct| match ct.literal {
- ConstantKind::Ty(_) => {
+ callee_body.required_consts.iter().copied().filter(|&ct| match ct.const_ {
+ Const::Ty(_) => {
bug!("should never encounter ty::UnevaluatedConst in `required_consts`")
}
- ConstantKind::Val(..) | ConstantKind::Unevaluated(..) => true,
+ Const::Val(..) | Const::Unevaluated(..) => true,
}),
);
}
@@ -809,9 +830,10 @@ impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
match terminator.kind {
TerminatorKind::Drop { ref place, unwind, .. } => {
// If the place doesn't actually need dropping, treat it like a regular goto.
- let ty = self
- .instance
- .subst_mir(tcx, ty::EarlyBinder::bind(&place.ty(self.callee_body, tcx).ty));
+ let ty = self.instance.instantiate_mir(
+ tcx,
+ ty::EarlyBinder::bind(&place.ty(self.callee_body, tcx).ty),
+ );
if ty.needs_drop(tcx, self.param_env) {
self.cost += CALL_PENALTY;
if let UnwindAction::Cleanup(_) = unwind {
@@ -822,7 +844,8 @@ impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
}
}
TerminatorKind::Call { func: Operand::Constant(ref f), unwind, .. } => {
- let fn_ty = self.instance.subst_mir(tcx, ty::EarlyBinder::bind(&f.literal.ty()));
+ let fn_ty =
+ self.instance.instantiate_mir(tcx, ty::EarlyBinder::bind(&f.const_.ty()));
self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind() && tcx.is_intrinsic(def_id) {
// Don't give intrinsics the extra penalty for calls
INSTR_COST
@@ -839,7 +862,7 @@ impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
self.cost += LANDINGPAD_PENALTY;
}
}
- TerminatorKind::Resume => self.cost += RESUME_PENALTY,
+ TerminatorKind::UnwindResume => self.cost += RESUME_PENALTY,
TerminatorKind::InlineAsm { unwind, .. } => {
self.cost += INSTR_COST;
if let UnwindAction::Cleanup(_) = unwind {
@@ -906,12 +929,12 @@ impl Integrator<'_, '_> {
UnwindAction::Cleanup(_) | UnwindAction::Continue => {
bug!("cleanup on cleanup block");
}
- UnwindAction::Unreachable | UnwindAction::Terminate => return unwind,
+ UnwindAction::Unreachable | UnwindAction::Terminate(_) => return unwind,
}
}
match unwind {
- UnwindAction::Unreachable | UnwindAction::Terminate => unwind,
+ UnwindAction::Unreachable | UnwindAction::Terminate(_) => unwind,
UnwindAction::Cleanup(target) => UnwindAction::Cleanup(self.map_block(target)),
// Add an unwind edge to the original call's cleanup block
UnwindAction::Continue => self.cleanup_block,
@@ -1017,15 +1040,15 @@ impl<'tcx> MutVisitor<'tcx> for Integrator<'_, 'tcx> {
TerminatorKind::Unreachable
}
}
- TerminatorKind::Resume => {
+ TerminatorKind::UnwindResume => {
terminator.kind = match self.cleanup_block {
UnwindAction::Cleanup(tgt) => TerminatorKind::Goto { target: tgt },
- UnwindAction::Continue => TerminatorKind::Resume,
+ UnwindAction::Continue => TerminatorKind::UnwindResume,
UnwindAction::Unreachable => TerminatorKind::Unreachable,
- UnwindAction::Terminate => TerminatorKind::Terminate,
+ UnwindAction::Terminate(reason) => TerminatorKind::UnwindTerminate(reason),
};
}
- TerminatorKind::Terminate => {}
+ TerminatorKind::UnwindTerminate(_) => {}
TerminatorKind::Unreachable => {}
TerminatorKind::FalseEdge { ref mut real_target, ref mut imaginary_target } => {
*real_target = self.map_block(*real_target);
diff --git a/compiler/rustc_mir_transform/src/inline/cycle.rs b/compiler/rustc_mir_transform/src/inline/cycle.rs
index 822634129..d30e0bad8 100644
--- a/compiler/rustc_mir_transform/src/inline/cycle.rs
+++ b/compiler/rustc_mir_transform/src/inline/cycle.rs
@@ -44,7 +44,7 @@ pub(crate) fn mir_callgraph_reachable<'tcx>(
) -> bool {
trace!(%caller);
for &(callee, args) in tcx.mir_inliner_callees(caller.def) {
- let Ok(args) = caller.try_subst_mir_and_normalize_erasing_regions(
+ let Ok(args) = caller.try_instantiate_mir_and_normalize_erasing_regions(
tcx,
param_env,
ty::EarlyBinder::bind(args),
diff --git a/compiler/rustc_mir_transform/src/instsimplify.rs b/compiler/rustc_mir_transform/src/instsimplify.rs
index 8b0a0903d..a6ef2e11a 100644
--- a/compiler/rustc_mir_transform/src/instsimplify.rs
+++ b/compiler/rustc_mir_transform/src/instsimplify.rs
@@ -104,7 +104,7 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
fn try_eval_bool(&self, a: &Operand<'_>) -> Option<bool> {
let a = a.constant()?;
- if a.literal.ty().is_bool() { a.literal.try_to_bool() } else { None }
+ if a.const_.ty().is_bool() { a.const_.try_to_bool() } else { None }
}
/// Transform "&(*a)" ==> "a".
@@ -136,8 +136,8 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
return;
}
- let literal = ConstantKind::from_const(len, self.tcx);
- let constant = Constant { span: source_info.span, literal, user_ty: None };
+ let const_ = Const::from_ty_const(len, self.tcx);
+ let constant = ConstOperand { span: source_info.span, const_, user_ty: None };
*rvalue = Rvalue::Use(Operand::Constant(Box::new(constant)));
}
}
diff --git a/compiler/rustc_mir_transform/src/large_enums.rs b/compiler/rustc_mir_transform/src/large_enums.rs
index 19108dabd..886ff7604 100644
--- a/compiler/rustc_mir_transform/src/large_enums.rs
+++ b/compiler/rustc_mir_transform/src/large_enums.rs
@@ -54,11 +54,8 @@ impl EnumSizeOpt {
let layout = tcx.layout_of(param_env.and(ty)).ok()?;
let variants = match &layout.variants {
Variants::Single { .. } => return None,
- Variants::Multiple { tag_encoding, .. }
- if matches!(tag_encoding, TagEncoding::Niche { .. }) =>
- {
- return None;
- }
+ Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, .. } => return None,
+
Variants::Multiple { variants, .. } if variants.len() <= 1 => return None,
Variants::Multiple { variants, .. } => variants,
};
@@ -114,7 +111,7 @@ impl EnumSizeOpt {
tcx.data_layout.ptr_sized_integer().align(&tcx.data_layout).abi,
Mutability::Not,
);
- let alloc = tcx.create_memory_alloc(tcx.mk_const_alloc(alloc));
+ let alloc = tcx.reserve_and_set_memory_alloc(tcx.mk_const_alloc(alloc));
Some((*adt_def, num_discrs, *alloc_cache.entry(ty).or_insert(alloc)))
}
fn optim<'tcx>(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
@@ -139,7 +136,6 @@ impl EnumSizeOpt {
let (adt_def, num_variants, alloc_id) =
self.candidate(tcx, param_env, ty, &mut alloc_cache)?;
- let alloc = tcx.global_alloc(alloc_id).unwrap_memory();
let tmp_ty = Ty::new_array(tcx, tcx.types.usize, num_variants as u64);
@@ -150,11 +146,11 @@ impl EnumSizeOpt {
};
let place = Place::from(size_array_local);
- let constant_vals = Constant {
+ let constant_vals = ConstOperand {
span,
user_ty: None,
- literal: ConstantKind::Val(
- interpret::ConstValue::ByRef { alloc, offset: Size::ZERO },
+ const_: Const::Val(
+ ConstValue::Indirect { alloc_id, offset: Size::ZERO },
tmp_ty,
),
};
diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs
index bf798adee..c0a09b7a7 100644
--- a/compiler/rustc_mir_transform/src/lib.rs
+++ b/compiler/rustc_mir_transform/src/lib.rs
@@ -2,6 +2,7 @@
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
#![feature(box_patterns)]
+#![feature(decl_macro)]
#![feature(is_sorted)]
#![feature(let_chains)]
#![feature(map_try_insert)]
@@ -30,9 +31,9 @@ use rustc_hir::intravisit::{self, Visitor};
use rustc_index::IndexVec;
use rustc_middle::mir::visit::Visitor as _;
use rustc_middle::mir::{
- traversal, AnalysisPhase, Body, CallSource, ClearCrossCrate, ConstQualifs, Constant, LocalDecl,
- MirPass, MirPhase, Operand, Place, ProjectionElem, Promoted, RuntimePhase, Rvalue, SourceInfo,
- Statement, StatementKind, TerminatorKind, START_BLOCK,
+ traversal, AnalysisPhase, Body, CallSource, ClearCrossCrate, ConstOperand, ConstQualifs,
+ LocalDecl, MirPass, MirPhase, Operand, Place, ProjectionElem, Promoted, RuntimePhase, Rvalue,
+ SourceInfo, Statement, StatementKind, TerminatorKind, START_BLOCK,
};
use rustc_middle::query::Providers;
use rustc_middle::ty::{self, TyCtxt, TypeVisitableExt};
@@ -53,6 +54,7 @@ mod check_packed_ref;
pub mod check_unsafety;
mod remove_place_mention;
// This pass is public to allow external drivers to perform MIR cleanup
+mod add_subtyping_projections;
pub mod cleanup_post_borrowck;
mod const_debuginfo;
mod const_goto;
@@ -75,6 +77,7 @@ mod errors;
mod ffi_unwind_calls;
mod function_item_references;
mod generator;
+mod gvn;
pub mod inline;
mod instsimplify;
mod large_enums;
@@ -148,14 +151,14 @@ fn remap_mir_for_const_eval_select<'tcx>(
let terminator = bb.terminator.as_mut().expect("invalid terminator");
match terminator.kind {
TerminatorKind::Call {
- func: Operand::Constant(box Constant { ref literal, .. }),
+ func: Operand::Constant(box ConstOperand { ref const_, .. }),
ref mut args,
destination,
target,
unwind,
fn_span,
..
- } if let ty::FnDef(def_id, _) = *literal.ty().kind()
+ } if let ty::FnDef(def_id, _) = *const_.ty().kind()
&& tcx.item_name(def_id) == sym::const_eval_select
&& tcx.is_intrinsic(def_id) =>
{
@@ -342,7 +345,7 @@ fn inner_mir_for_ctfe(tcx: TyCtxt<'_>, def: LocalDefId) -> Body<'_> {
let body = match tcx.hir().body_const_context(def) {
// consts and statics do not have `optimized_mir`, so we can steal the body instead of
// cloning it.
- Some(hir::ConstContext::Const | hir::ConstContext::Static(_)) => body.steal(),
+ Some(hir::ConstContext::Const { .. } | hir::ConstContext::Static(_)) => body.steal(),
Some(hir::ConstContext::ConstFn) => body.borrow().clone(),
None => bug!("`mir_for_ctfe` called on non-const {def:?}"),
};
@@ -357,9 +360,7 @@ fn inner_mir_for_ctfe(tcx: TyCtxt<'_>, def: LocalDefId) -> Body<'_> {
/// mir borrowck *before* doing so in order to ensure that borrowck can be run and doesn't
/// end up missing the source MIR due to stealing happening.
fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> &Steal<Body<'_>> {
- if tcx.sess.opts.unstable_opts.drop_tracking_mir
- && let DefKind::Generator = tcx.def_kind(def)
- {
+ if let DefKind::Generator = tcx.def_kind(def) {
tcx.ensure_with_value().mir_generator_witnesses(def);
}
let mir_borrowck = tcx.mir_borrowck(def);
@@ -480,6 +481,8 @@ fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let passes: &[&dyn MirPass<'tcx>] = &[
// These next passes must be executed together
&add_call_guards::CriticalCallEdges,
+ &reveal_all::RevealAll, // has to be done before drop elaboration, since we need to drop opaque types, too.
+ &add_subtyping_projections::Subtyper, // calling this after reveal_all ensures that we don't deal with opaque types
&elaborate_drops::ElaborateDrops,
// This will remove extraneous landing pads which are no longer
// necessary as well as well as forcing any call in a non-unwinding
@@ -526,7 +529,6 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
body,
&[
&check_alignment::CheckAlignment,
- &reveal_all::RevealAll, // has to be done before inlining, since inlined code is in RevealAll mode.
&lower_slice_len::LowerSliceLenCalls, // has to be done before inlining, otherwise actual call will be almost always inlined. Also simple, so can just do first
&unreachable_prop::UnreachablePropagation,
&uninhabited_enum_branching::UninhabitedEnumBranching,
@@ -550,6 +552,7 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
// latter pass will leverage the created opportunities.
&separate_const_switch::SeparateConstSwitch,
&const_prop::ConstProp,
+ &gvn::GVN,
&dataflow_const_prop::DataflowConstProp,
//
// Const-prop runs unconditionally, but doesn't mutate the MIR at mir-opt-level=0.
@@ -605,6 +608,11 @@ fn inner_optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> Body<'_> {
let body = tcx.mir_drops_elaborated_and_const_checked(did).steal();
let mut body = remap_mir_for_const_eval_select(tcx, body, hir::Constness::NotConst);
debug!("body: {:#?}", body);
+
+ if body.tainted_by_errors.is_some() {
+ return body;
+ }
+
run_optimization_passes(tcx, &mut body);
body
diff --git a/compiler/rustc_mir_transform/src/lower_intrinsics.rs b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
index fc36c6e41..0d2d764c4 100644
--- a/compiler/rustc_mir_transform/src/lower_intrinsics.rs
+++ b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
@@ -1,11 +1,10 @@
//! Lowers intrinsic calls
-use crate::{errors, MirPass};
+use crate::MirPass;
use rustc_middle::mir::*;
use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::symbol::{sym, Symbol};
-use rustc_span::Span;
use rustc_target::abi::{FieldIdx, VariantIdx};
pub struct LowerIntrinsics;
@@ -33,10 +32,10 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
source_info: terminator.source_info,
kind: StatementKind::Assign(Box::new((
*destination,
- Rvalue::Use(Operand::Constant(Box::new(Constant {
+ Rvalue::Use(Operand::Constant(Box::new(ConstOperand {
span: terminator.source_info.span,
user_ty: None,
- literal: ConstantKind::zero_sized(tcx.types.unit),
+ const_: Const::zero_sized(tcx.types.unit),
}))),
))),
});
@@ -176,23 +175,22 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
} else {
span_bug!(terminator.source_info.span, "Only passing a local is supported");
};
+ // Add new statement at the end of the block that does the read, and patch
+ // up the terminator.
+ block.statements.push(Statement {
+ source_info: terminator.source_info,
+ kind: StatementKind::Assign(Box::new((
+ *destination,
+ Rvalue::Use(Operand::Copy(derefed_place)),
+ ))),
+ });
terminator.kind = match *target {
None => {
// No target means this read something uninhabited,
- // so it must be unreachable, and we don't need to
- // preserve the assignment either.
+ // so it must be unreachable.
TerminatorKind::Unreachable
}
- Some(target) => {
- block.statements.push(Statement {
- source_info: terminator.source_info,
- kind: StatementKind::Assign(Box::new((
- *destination,
- Rvalue::Use(Operand::Copy(derefed_place)),
- ))),
- });
- TerminatorKind::Goto { target }
- }
+ Some(target) => TerminatorKind::Goto { target },
}
}
sym::write_via_move => {
@@ -305,9 +303,6 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
terminator.kind = TerminatorKind::Unreachable;
}
}
- sym::simd_shuffle => {
- validate_simd_shuffle(tcx, args, terminator.source_info.span);
- }
_ => {}
}
}
@@ -326,9 +321,3 @@ fn resolve_rust_intrinsic<'tcx>(
}
None
}
-
-fn validate_simd_shuffle<'tcx>(tcx: TyCtxt<'tcx>, args: &[Operand<'tcx>], span: Span) {
- if !matches!(args[2], Operand::Constant(_)) {
- tcx.sess.emit_err(errors::SimdShuffleLastConst { span });
- }
-}
diff --git a/compiler/rustc_mir_transform/src/match_branches.rs b/compiler/rustc_mir_transform/src/match_branches.rs
index bc29fb8de..3dc627b61 100644
--- a/compiler/rustc_mir_transform/src/match_branches.rs
+++ b/compiler/rustc_mir_transform/src/match_branches.rs
@@ -98,10 +98,10 @@ impl<'tcx> MirPass<'tcx> for MatchBranchSimplification {
StatementKind::Assign(box (lhs_f, Rvalue::Use(Operand::Constant(f_c)))),
StatementKind::Assign(box (lhs_s, Rvalue::Use(Operand::Constant(s_c)))),
) if lhs_f == lhs_s
- && f_c.literal.ty().is_bool()
- && s_c.literal.ty().is_bool()
- && f_c.literal.try_eval_bool(tcx, param_env).is_some()
- && s_c.literal.try_eval_bool(tcx, param_env).is_some() => {}
+ && f_c.const_.ty().is_bool()
+ && s_c.const_.ty().is_bool()
+ && f_c.const_.try_eval_bool(tcx, param_env).is_some()
+ && s_c.const_.try_eval_bool(tcx, param_env).is_some() => {}
// Otherwise we cannot optimize. Try another block.
_ => continue 'outer,
@@ -128,8 +128,8 @@ impl<'tcx> MirPass<'tcx> for MatchBranchSimplification {
StatementKind::Assign(box (_, Rvalue::Use(Operand::Constant(s_c)))),
) => {
// From earlier loop we know that we are dealing with bool constants only:
- let f_b = f_c.literal.try_eval_bool(tcx, param_env).unwrap();
- let s_b = s_c.literal.try_eval_bool(tcx, param_env).unwrap();
+ let f_b = f_c.const_.try_eval_bool(tcx, param_env).unwrap();
+ let s_b = s_c.const_.try_eval_bool(tcx, param_env).unwrap();
if f_b == s_b {
// Same value in both blocks. Use statement as is.
(*f).clone()
diff --git a/compiler/rustc_mir_transform/src/normalize_array_len.rs b/compiler/rustc_mir_transform/src/normalize_array_len.rs
index 6c3b7c58f..d1a4b26a0 100644
--- a/compiler/rustc_mir_transform/src/normalize_array_len.rs
+++ b/compiler/rustc_mir_transform/src/normalize_array_len.rs
@@ -90,10 +90,10 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'tcx> {
&& let [PlaceElem::Deref] = &place.projection[..]
&& let Some(len) = self.slice_lengths[place.local]
{
- *rvalue = Rvalue::Use(Operand::Constant(Box::new(Constant {
+ *rvalue = Rvalue::Use(Operand::Constant(Box::new(ConstOperand {
span: rustc_span::DUMMY_SP,
user_ty: None,
- literal: ConstantKind::from_const(len, self.tcx),
+ const_: Const::from_ty_const(len, self.tcx),
})));
}
self.super_rvalue(rvalue, loc);
diff --git a/compiler/rustc_mir_transform/src/pass_manager.rs b/compiler/rustc_mir_transform/src/pass_manager.rs
index 057f5fe82..5abb2f3d0 100644
--- a/compiler/rustc_mir_transform/src/pass_manager.rs
+++ b/compiler/rustc_mir_transform/src/pass_manager.rs
@@ -94,6 +94,8 @@ fn run_passes_inner<'tcx>(
let overridden_passes = &tcx.sess.opts.unstable_opts.mir_enable_passes;
trace!(?overridden_passes);
+ let prof_arg = tcx.sess.prof.enabled().then(|| format!("{:?}", body.source.def_id()));
+
if !body.should_skip() {
for pass in passes {
let name = pass.name();
@@ -121,7 +123,14 @@ fn run_passes_inner<'tcx>(
validate_body(tcx, body, format!("before pass {name}"));
}
- tcx.sess.time(name, || pass.run_pass(tcx, body));
+ if let Some(prof_arg) = &prof_arg {
+ tcx.sess
+ .prof
+ .generic_activity_with_arg(pass.profiler_name(), &**prof_arg)
+ .run(|| pass.run_pass(tcx, body));
+ } else {
+ pass.run_pass(tcx, body);
+ }
if dump_enabled {
dump_mir_for_pass(tcx, body, &name, true);
diff --git a/compiler/rustc_mir_transform/src/ref_prop.rs b/compiler/rustc_mir_transform/src/ref_prop.rs
index 49a940b57..67941cf43 100644
--- a/compiler/rustc_mir_transform/src/ref_prop.rs
+++ b/compiler/rustc_mir_transform/src/ref_prop.rs
@@ -108,7 +108,7 @@ enum Value<'tcx> {
}
/// For each local, save the place corresponding to `*local`.
-#[instrument(level = "trace", skip(tcx, body))]
+#[instrument(level = "trace", skip(tcx, body, ssa))]
fn compute_replacement<'tcx>(
tcx: TyCtxt<'tcx>,
body: &Body<'tcx>,
diff --git a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
index 4e85c76fb..8c48a6677 100644
--- a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
+++ b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
@@ -63,7 +63,7 @@ impl RemoveNoopLandingPads {
let terminator = body[bb].terminator();
match terminator.kind {
TerminatorKind::Goto { .. }
- | TerminatorKind::Resume
+ | TerminatorKind::UnwindResume
| TerminatorKind::SwitchInt { .. }
| TerminatorKind::FalseEdge { .. }
| TerminatorKind::FalseUnwind { .. } => {
@@ -72,7 +72,7 @@ impl RemoveNoopLandingPads {
TerminatorKind::GeneratorDrop
| TerminatorKind::Yield { .. }
| TerminatorKind::Return
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Unreachable
| TerminatorKind::Call { .. }
| TerminatorKind::Assert { .. }
@@ -88,7 +88,7 @@ impl RemoveNoopLandingPads {
let has_resume = body
.basic_blocks
.iter_enumerated()
- .any(|(_bb, block)| matches!(block.terminator().kind, TerminatorKind::Resume));
+ .any(|(_bb, block)| matches!(block.terminator().kind, TerminatorKind::UnwindResume));
if !has_resume {
debug!("remove_noop_landing_pads: no resume block in MIR");
return;
diff --git a/compiler/rustc_mir_transform/src/remove_zsts.rs b/compiler/rustc_mir_transform/src/remove_zsts.rs
index 9c6c55b08..a34d4b027 100644
--- a/compiler/rustc_mir_transform/src/remove_zsts.rs
+++ b/compiler/rustc_mir_transform/src/remove_zsts.rs
@@ -1,7 +1,6 @@
//! Removes operations on ZST places, and convert ZST operands to constants.
use crate::MirPass;
-use rustc_middle::mir::interpret::ConstValue;
use rustc_middle::mir::visit::*;
use rustc_middle::mir::*;
use rustc_middle::ty::{self, Ty, TyCtxt};
@@ -63,12 +62,12 @@ impl<'tcx> Replacer<'_, 'tcx> {
layout.is_zst()
}
- fn make_zst(&self, ty: Ty<'tcx>) -> Constant<'tcx> {
+ fn make_zst(&self, ty: Ty<'tcx>) -> ConstOperand<'tcx> {
debug_assert!(self.known_to_be_zst(ty));
- Constant {
+ ConstOperand {
span: rustc_span::DUMMY_SP,
user_ty: None,
- literal: ConstantKind::Val(ConstValue::ZeroSized, ty),
+ const_: Const::Val(ConstValue::ZeroSized, ty),
}
}
}
@@ -87,11 +86,6 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'_, 'tcx> {
var_debug_info.value = VarDebugInfoContents::Const(self.make_zst(place_ty))
}
}
- VarDebugInfoContents::Composite { ty, fragments: _ } => {
- if self.known_to_be_zst(ty) {
- var_debug_info.value = VarDebugInfoContents::Const(self.make_zst(ty))
- }
- }
}
}
diff --git a/compiler/rustc_mir_transform/src/required_consts.rs b/compiler/rustc_mir_transform/src/required_consts.rs
index 243cb4635..abde6a47e 100644
--- a/compiler/rustc_mir_transform/src/required_consts.rs
+++ b/compiler/rustc_mir_transform/src/required_consts.rs
@@ -1,27 +1,27 @@
use rustc_middle::mir::visit::Visitor;
-use rustc_middle::mir::{Constant, ConstantKind, Location};
+use rustc_middle::mir::{Const, ConstOperand, Location};
use rustc_middle::ty::ConstKind;
pub struct RequiredConstsVisitor<'a, 'tcx> {
- required_consts: &'a mut Vec<Constant<'tcx>>,
+ required_consts: &'a mut Vec<ConstOperand<'tcx>>,
}
impl<'a, 'tcx> RequiredConstsVisitor<'a, 'tcx> {
- pub fn new(required_consts: &'a mut Vec<Constant<'tcx>>) -> Self {
+ pub fn new(required_consts: &'a mut Vec<ConstOperand<'tcx>>) -> Self {
RequiredConstsVisitor { required_consts }
}
}
impl<'tcx> Visitor<'tcx> for RequiredConstsVisitor<'_, 'tcx> {
- fn visit_constant(&mut self, constant: &Constant<'tcx>, _: Location) {
- let literal = constant.literal;
- match literal {
- ConstantKind::Ty(c) => match c.kind() {
+ fn visit_constant(&mut self, constant: &ConstOperand<'tcx>, _: Location) {
+ let const_ = constant.const_;
+ match const_ {
+ Const::Ty(c) => match c.kind() {
ConstKind::Param(_) | ConstKind::Error(_) | ConstKind::Value(_) => {}
_ => bug!("only ConstKind::Param/Value should be encountered here, got {:#?}", c),
},
- ConstantKind::Unevaluated(..) => self.required_consts.push(*constant),
- ConstantKind::Val(..) => {}
+ Const::Unevaluated(..) => self.required_consts.push(*constant),
+ Const::Val(..) => {}
}
}
}
diff --git a/compiler/rustc_mir_transform/src/reveal_all.rs b/compiler/rustc_mir_transform/src/reveal_all.rs
index 23442f8b9..1626cf3c0 100644
--- a/compiler/rustc_mir_transform/src/reveal_all.rs
+++ b/compiler/rustc_mir_transform/src/reveal_all.rs
@@ -8,16 +8,7 @@ use rustc_middle::ty::{self, Ty, TyCtxt};
pub struct RevealAll;
impl<'tcx> MirPass<'tcx> for RevealAll {
- fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
- sess.mir_opt_level() >= 3 || super::inline::Inline.is_enabled(sess)
- }
-
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
- // Do not apply this transformation to generators.
- if body.generator.is_some() {
- return;
- }
-
let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
RevealAllVisitor { tcx, param_env }.visit_body_preserves_cfg(body);
}
@@ -35,13 +26,38 @@ impl<'tcx> MutVisitor<'tcx> for RevealAllVisitor<'tcx> {
}
#[inline]
- fn visit_constant(&mut self, constant: &mut Constant<'tcx>, _: Location) {
+ fn visit_place(
+ &mut self,
+ place: &mut Place<'tcx>,
+ _context: PlaceContext,
+ _location: Location,
+ ) {
+ // Performance optimization: don't reintern if there is no `OpaqueCast` to remove.
+ if place.projection.iter().all(|elem| !matches!(elem, ProjectionElem::OpaqueCast(_))) {
+ return;
+ }
+ // `OpaqueCast` projections are only needed if there are opaque types on which projections are performed.
+ // After the `RevealAll` pass, all opaque types are replaced with their hidden types, so we don't need these
+ // projections anymore.
+ place.projection = self.tcx.mk_place_elems(
+ &place
+ .projection
+ .into_iter()
+ .filter(|elem| !matches!(elem, ProjectionElem::OpaqueCast(_)))
+ .collect::<Vec<_>>(),
+ );
+ self.super_place(place, _context, _location);
+ }
+
+ #[inline]
+ fn visit_constant(&mut self, constant: &mut ConstOperand<'tcx>, location: Location) {
// We have to use `try_normalize_erasing_regions` here, since it's
// possible that we visit impossible-to-satisfy where clauses here,
// see #91745
- if let Ok(c) = self.tcx.try_normalize_erasing_regions(self.param_env, constant.literal) {
- constant.literal = c;
+ if let Ok(c) = self.tcx.try_normalize_erasing_regions(self.param_env, constant.const_) {
+ constant.const_ = c;
}
+ self.super_constant(constant, location);
}
#[inline]
diff --git a/compiler/rustc_mir_transform/src/separate_const_switch.rs b/compiler/rustc_mir_transform/src/separate_const_switch.rs
index 1d8e54cdc..e1e4acccc 100644
--- a/compiler/rustc_mir_transform/src/separate_const_switch.rs
+++ b/compiler/rustc_mir_transform/src/separate_const_switch.rs
@@ -108,13 +108,13 @@ pub fn separate_const_switch(body: &mut Body<'_>) -> usize {
}
// The following terminators are not allowed
- TerminatorKind::Resume
+ TerminatorKind::UnwindResume
| TerminatorKind::Drop { .. }
| TerminatorKind::Call { .. }
| TerminatorKind::Assert { .. }
| TerminatorKind::FalseUnwind { .. }
| TerminatorKind::Yield { .. }
- | TerminatorKind::Terminate
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::InlineAsm { .. }
@@ -165,8 +165,8 @@ pub fn separate_const_switch(body: &mut Body<'_>) -> usize {
});
}
- TerminatorKind::Resume
- | TerminatorKind::Terminate
+ TerminatorKind::UnwindResume
+ | TerminatorKind::UnwindTerminate(_)
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::GeneratorDrop
diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs
index 223dc59c6..e9895d97d 100644
--- a/compiler/rustc_mir_transform/src/shim.rs
+++ b/compiler/rustc_mir_transform/src/shim.rs
@@ -99,7 +99,11 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<'
};
debug!("make_shim({:?}) = untransformed {:?}", instance, result);
- pm::run_passes(
+ // We don't validate MIR here because the shims may generate code that's
+ // only valid in a reveal-all param-env. However, since we do initial
+ // validation with the MirBuilt phase, which uses a user-facing param-env.
+ // This causes validation errors when TAITs are involved.
+ pm::run_passes_no_validate(
tcx,
&mut result,
&[
@@ -493,10 +497,10 @@ impl<'tcx> CloneShimBuilder<'tcx> {
// `func == Clone::clone(&ty) -> ty`
let func_ty = Ty::new_fn_def(tcx, self.def_id, [ty]);
- let func = Operand::Constant(Box::new(Constant {
+ let func = Operand::Constant(Box::new(ConstOperand {
span: self.span,
user_ty: None,
- literal: ConstantKind::zero_sized(func_ty),
+ const_: Const::zero_sized(func_ty),
}));
let ref_loc = self.make_place(
@@ -566,10 +570,10 @@ impl<'tcx> CloneShimBuilder<'tcx> {
TerminatorKind::Drop {
place: dest_field,
target: unwind,
- unwind: UnwindAction::Terminate,
+ unwind: UnwindAction::Terminate(UnwindTerminateReason::InCleanup),
replace: false,
},
- true,
+ /* is_cleanup */ true,
);
unwind = next_unwind;
}
@@ -583,7 +587,7 @@ impl<'tcx> CloneShimBuilder<'tcx> {
I: IntoIterator<Item = Ty<'tcx>>,
{
self.block(vec![], TerminatorKind::Goto { target: self.block_index_offset(3) }, false);
- let unwind = self.block(vec![], TerminatorKind::Resume, true);
+ let unwind = self.block(vec![], TerminatorKind::UnwindResume, true);
let target = self.block(vec![], TerminatorKind::Return, false);
let _final_cleanup_block = self.clone_fields(dest, src, target, unwind, tys);
@@ -597,7 +601,7 @@ impl<'tcx> CloneShimBuilder<'tcx> {
args: GeneratorArgs<'tcx>,
) {
self.block(vec![], TerminatorKind::Goto { target: self.block_index_offset(3) }, false);
- let unwind = self.block(vec![], TerminatorKind::Resume, true);
+ let unwind = self.block(vec![], TerminatorKind::UnwindResume, true);
// This will get overwritten with a switch once we know the target blocks
let switch = self.block(vec![], TerminatorKind::Unreachable, false);
let unwind = self.clone_fields(dest, src, switch, unwind, args.upvar_tys());
@@ -760,10 +764,10 @@ fn build_call_shim<'tcx>(
CallKind::Direct(def_id) => {
let ty = tcx.type_of(def_id).instantiate_identity();
(
- Operand::Constant(Box::new(Constant {
+ Operand::Constant(Box::new(ConstOperand {
span,
user_ty: None,
- literal: ConstantKind::zero_sized(ty),
+ const_: Const::zero_sized(ty),
})),
rcvr.into_iter().collect::<Vec<_>>(),
)
@@ -847,14 +851,14 @@ fn build_call_shim<'tcx>(
TerminatorKind::Drop {
place: rcvr_place(),
target: BasicBlock::new(4),
- unwind: UnwindAction::Terminate,
+ unwind: UnwindAction::Terminate(UnwindTerminateReason::InCleanup),
replace: false,
},
- true,
+ /* is_cleanup */ true,
);
// BB #4 - resume
- block(&mut blocks, vec![], TerminatorKind::Resume, true);
+ block(&mut blocks, vec![], TerminatorKind::UnwindResume, true);
}
let mut body =
diff --git a/compiler/rustc_mir_transform/src/simplify.rs b/compiler/rustc_mir_transform/src/simplify.rs
index b7a51cfd6..2795cf157 100644
--- a/compiler/rustc_mir_transform/src/simplify.rs
+++ b/compiler/rustc_mir_transform/src/simplify.rs
@@ -29,6 +29,7 @@
use crate::MirPass;
use rustc_data_structures::fx::{FxHashSet, FxIndexSet};
+use rustc_index::bit_set::BitSet;
use rustc_index::{Idx, IndexSlice, IndexVec};
use rustc_middle::mir::coverage::*;
use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
@@ -345,24 +346,22 @@ pub fn remove_dead_blocks<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let basic_blocks = body.basic_blocks.as_mut();
let source_scopes = &body.source_scopes;
- let mut replacements: Vec<_> = (0..num_blocks).map(BasicBlock::new).collect();
- let mut used_blocks = 0;
- for alive_index in reachable.iter() {
- let alive_index = alive_index.index();
- replacements[alive_index] = BasicBlock::new(used_blocks);
- if alive_index != used_blocks {
- // Swap the next alive block data with the current available slot. Since
- // alive_index is non-decreasing this is a valid operation.
- basic_blocks.raw.swap(alive_index, used_blocks);
- }
- used_blocks += 1;
- }
-
if tcx.sess.instrument_coverage() {
- save_unreachable_coverage(basic_blocks, source_scopes, used_blocks);
+ save_unreachable_coverage(basic_blocks, source_scopes, &reachable);
}
- basic_blocks.raw.truncate(used_blocks);
+ let mut replacements: Vec<_> = (0..num_blocks).map(BasicBlock::new).collect();
+ let mut orig_index = 0;
+ let mut used_index = 0;
+ basic_blocks.raw.retain(|_| {
+ let keep = reachable.contains(BasicBlock::new(orig_index));
+ if keep {
+ replacements[orig_index] = BasicBlock::new(used_index);
+ used_index += 1;
+ }
+ orig_index += 1;
+ keep
+ });
for block in basic_blocks {
for target in block.terminator_mut().successors_mut() {
@@ -404,11 +403,12 @@ pub fn remove_dead_blocks<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
fn save_unreachable_coverage(
basic_blocks: &mut IndexSlice<BasicBlock, BasicBlockData<'_>>,
source_scopes: &IndexSlice<SourceScope, SourceScopeData<'_>>,
- first_dead_block: usize,
+ reachable: &BitSet<BasicBlock>,
) {
// Identify instances that still have some live coverage counters left.
let mut live = FxHashSet::default();
- for basic_block in &basic_blocks.raw[0..first_dead_block] {
+ for bb in reachable.iter() {
+ let basic_block = &basic_blocks[bb];
for statement in &basic_block.statements {
let StatementKind::Coverage(coverage) = &statement.kind else { continue };
let CoverageKind::Counter { .. } = coverage.kind else { continue };
@@ -417,7 +417,8 @@ fn save_unreachable_coverage(
}
}
- for block in &mut basic_blocks.raw[..first_dead_block] {
+ for bb in reachable.iter() {
+ let block = &mut basic_blocks[bb];
for statement in &mut block.statements {
let StatementKind::Coverage(_) = &statement.kind else { continue };
let instance = statement.source_info.scope.inlined_instance(source_scopes);
@@ -433,7 +434,11 @@ fn save_unreachable_coverage(
// Retain coverage for instances that still have some live counters left.
let mut retained_coverage = Vec::new();
- for dead_block in &basic_blocks.raw[first_dead_block..] {
+ for dead_block in basic_blocks.indices() {
+ if reachable.contains(dead_block) {
+ continue;
+ }
+ let dead_block = &basic_blocks[dead_block];
for statement in &dead_block.statements {
let StatementKind::Coverage(coverage) = &statement.kind else { continue };
let Some(code_region) = &coverage.code_region else { continue };
diff --git a/compiler/rustc_mir_transform/src/simplify_branches.rs b/compiler/rustc_mir_transform/src/simplify_branches.rs
index 1ff488169..b508cd1c9 100644
--- a/compiler/rustc_mir_transform/src/simplify_branches.rs
+++ b/compiler/rustc_mir_transform/src/simplify_branches.rs
@@ -23,7 +23,7 @@ impl<'tcx> MirPass<'tcx> for SimplifyConstCondition {
TerminatorKind::SwitchInt {
discr: Operand::Constant(ref c), ref targets, ..
} => {
- let constant = c.literal.try_eval_bits(tcx, param_env, c.ty());
+ let constant = c.const_.try_eval_bits(tcx, param_env);
if let Some(constant) = constant {
let target = targets.target_for_value(constant);
TerminatorKind::Goto { target }
@@ -33,7 +33,7 @@ impl<'tcx> MirPass<'tcx> for SimplifyConstCondition {
}
TerminatorKind::Assert {
target, cond: Operand::Constant(ref c), expected, ..
- } => match c.literal.try_eval_bool(tcx, param_env) {
+ } => match c.const_.try_eval_bool(tcx, param_env) {
Some(v) if v == expected => TerminatorKind::Goto { target },
_ => continue,
},
diff --git a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs
index 113ca2fc5..1a8cfc411 100644
--- a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs
+++ b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs
@@ -206,12 +206,12 @@ fn find_branch_value_info<'tcx>(
match (left, right) {
(Constant(branch_value), Copy(to_switch_on) | Move(to_switch_on))
| (Copy(to_switch_on) | Move(to_switch_on), Constant(branch_value)) => {
- let branch_value_ty = branch_value.literal.ty();
+ let branch_value_ty = branch_value.const_.ty();
// we only want to apply this optimization if we are matching on integrals (and chars), as it is not possible to switch on floats
if !branch_value_ty.is_integral() && !branch_value_ty.is_char() {
return None;
};
- let branch_value_scalar = branch_value.literal.try_to_scalar()?;
+ let branch_value_scalar = branch_value.const_.try_to_scalar()?;
Some((branch_value_scalar, branch_value_ty, *to_switch_on))
}
_ => None,
diff --git a/compiler/rustc_mir_transform/src/sroa.rs b/compiler/rustc_mir_transform/src/sroa.rs
index e66ae8ff8..c21b1724c 100644
--- a/compiler/rustc_mir_transform/src/sroa.rs
+++ b/compiler/rustc_mir_transform/src/sroa.rs
@@ -1,4 +1,5 @@
use crate::MirPass;
+use rustc_data_structures::flat_map_in_place::FlatMapInPlace;
use rustc_index::bit_set::{BitSet, GrowableBitSet};
use rustc_index::IndexVec;
use rustc_middle::mir::patch::MirPatch;
@@ -147,7 +148,7 @@ fn escaping_locals<'tcx>(
}
// We ignore anything that happens in debuginfo, since we expand it using
- // `VarDebugInfoContents::Composite`.
+ // `VarDebugInfoFragment`.
fn visit_var_debug_info(&mut self, _: &VarDebugInfo<'tcx>) {}
}
}
@@ -246,9 +247,7 @@ fn replace_flattened_locals<'tcx>(
for (index, annotation) in body.user_type_annotations.iter_enumerated_mut() {
visitor.visit_user_type_annotation(index, annotation);
}
- for var_debug_info in &mut body.var_debug_info {
- visitor.visit_var_debug_info(var_debug_info);
- }
+ visitor.expand_var_debug_info(&mut body.var_debug_info);
let ReplacementVisitor { patch, all_dead_locals, .. } = visitor;
patch.apply(body);
all_dead_locals
@@ -256,7 +255,7 @@ fn replace_flattened_locals<'tcx>(
struct ReplacementVisitor<'tcx, 'll> {
tcx: TyCtxt<'tcx>,
- /// This is only used to compute the type for `VarDebugInfoContents::Composite`.
+ /// This is only used to compute the type for `VarDebugInfoFragment`.
local_decls: &'ll LocalDecls<'tcx>,
/// Work to do.
replacements: &'ll ReplacementMap<'tcx>,
@@ -266,16 +265,38 @@ struct ReplacementVisitor<'tcx, 'll> {
}
impl<'tcx> ReplacementVisitor<'tcx, '_> {
- fn gather_debug_info_fragments(&self, local: Local) -> Option<Vec<VarDebugInfoFragment<'tcx>>> {
- let mut fragments = Vec::new();
- let parts = self.replacements.place_fragments(local.into())?;
- for (field, ty, replacement_local) in parts {
- fragments.push(VarDebugInfoFragment {
- projection: vec![PlaceElem::Field(field, ty)],
- contents: Place::from(replacement_local),
- });
- }
- Some(fragments)
+ #[instrument(level = "trace", skip(self))]
+ fn expand_var_debug_info(&mut self, var_debug_info: &mut Vec<VarDebugInfo<'tcx>>) {
+ var_debug_info.flat_map_in_place(|mut var_debug_info| {
+ let place = match var_debug_info.value {
+ VarDebugInfoContents::Const(_) => return vec![var_debug_info],
+ VarDebugInfoContents::Place(ref mut place) => place,
+ };
+
+ if let Some(repl) = self.replacements.replace_place(self.tcx, place.as_ref()) {
+ *place = repl;
+ return vec![var_debug_info];
+ }
+
+ let Some(parts) = self.replacements.place_fragments(*place) else {
+ return vec![var_debug_info];
+ };
+
+ let ty = place.ty(self.local_decls, self.tcx).ty;
+
+ parts
+ .map(|(field, field_ty, replacement_local)| {
+ let mut var_debug_info = var_debug_info.clone();
+ let composite = var_debug_info.composite.get_or_insert_with(|| {
+ Box::new(VarDebugInfoFragment { ty, projection: Vec::new() })
+ });
+ composite.projection.push(PlaceElem::Field(field, field_ty));
+
+ var_debug_info.value = VarDebugInfoContents::Place(replacement_local.into());
+ var_debug_info
+ })
+ .collect()
+ });
}
}
@@ -422,48 +443,6 @@ impl<'tcx, 'll> MutVisitor<'tcx> for ReplacementVisitor<'tcx, 'll> {
self.super_statement(statement, location)
}
- #[instrument(level = "trace", skip(self))]
- fn visit_var_debug_info(&mut self, var_debug_info: &mut VarDebugInfo<'tcx>) {
- match &mut var_debug_info.value {
- VarDebugInfoContents::Place(ref mut place) => {
- if let Some(repl) = self.replacements.replace_place(self.tcx, place.as_ref()) {
- *place = repl;
- } else if let Some(local) = place.as_local()
- && let Some(fragments) = self.gather_debug_info_fragments(local)
- {
- let ty = place.ty(self.local_decls, self.tcx).ty;
- var_debug_info.value = VarDebugInfoContents::Composite { ty, fragments };
- }
- }
- VarDebugInfoContents::Composite { ty: _, ref mut fragments } => {
- let mut new_fragments = Vec::new();
- debug!(?fragments);
- fragments.retain_mut(|fragment| {
- if let Some(repl) =
- self.replacements.replace_place(self.tcx, fragment.contents.as_ref())
- {
- fragment.contents = repl;
- true
- } else if let Some(local) = fragment.contents.as_local()
- && let Some(frg) = self.gather_debug_info_fragments(local)
- {
- new_fragments.extend(frg.into_iter().map(|mut f| {
- f.projection.splice(0..0, fragment.projection.iter().copied());
- f
- }));
- false
- } else {
- true
- }
- });
- debug!(?fragments);
- debug!(?new_fragments);
- fragments.extend(new_fragments);
- }
- VarDebugInfoContents::Const(_) => {}
- }
- }
-
fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _: Location) {
assert!(!self.all_dead_locals.contains(*local));
}
diff --git a/compiler/rustc_mir_transform/src/ssa.rs b/compiler/rustc_mir_transform/src/ssa.rs
index 04bc461c8..43fc1b7b9 100644
--- a/compiler/rustc_mir_transform/src/ssa.rs
+++ b/compiler/rustc_mir_transform/src/ssa.rs
@@ -13,7 +13,6 @@ use rustc_middle::middle::resolve_bound_vars::Set1;
use rustc_middle::mir::visit::*;
use rustc_middle::mir::*;
-#[derive(Debug)]
pub struct SsaLocals {
/// Assignments to each local. This defines whether the local is SSA.
assignments: IndexVec<Local, Set1<LocationExtended>>,
@@ -79,14 +78,10 @@ impl SsaLocals {
visitor.assignments[local] = Set1::One(LocationExtended::Arg);
}
- if body.basic_blocks.len() > 2 {
- for (bb, data) in traversal::reverse_postorder(body) {
- visitor.visit_basic_block_data(bb, data);
- }
- } else {
- for (bb, data) in body.basic_blocks.iter_enumerated() {
- visitor.visit_basic_block_data(bb, data);
- }
+ // For SSA assignments, a RPO visit will see the assignment before it sees any use.
+ // We only visit reachable nodes: computing `dominates` on an unreachable node ICEs.
+ for (bb, data) in traversal::reverse_postorder(body) {
+ visitor.visit_basic_block_data(bb, data);
}
for var_debug_info in &body.var_debug_info {
@@ -129,6 +124,25 @@ impl SsaLocals {
self.direct_uses[local]
}
+ pub fn assignment_dominates(
+ &self,
+ dominators: &Dominators<BasicBlock>,
+ local: Local,
+ location: Location,
+ ) -> bool {
+ match self.assignments[local] {
+ Set1::One(LocationExtended::Arg) => true,
+ Set1::One(LocationExtended::Plain(ass)) => {
+ if ass.block == location.block {
+ ass.statement_index < location.statement_index
+ } else {
+ dominators.dominates(ass.block, location.block)
+ }
+ }
+ _ => false,
+ }
+ }
+
pub fn assignments<'a, 'tcx>(
&'a self,
body: &'a Body<'tcx>,
@@ -146,6 +160,24 @@ impl SsaLocals {
})
}
+ pub fn for_each_assignment_mut<'tcx>(
+ &self,
+ basic_blocks: &mut BasicBlocks<'tcx>,
+ mut f: impl FnMut(Local, &mut Rvalue<'tcx>, Location),
+ ) {
+ for &local in &self.assignment_order {
+ if let Set1::One(LocationExtended::Plain(loc)) = self.assignments[local] {
+ // `loc` must point to a direct assignment to `local`.
+ let bbs = basic_blocks.as_mut_preserves_cfg();
+ let bb = &mut bbs[loc.block];
+ let stmt = &mut bb.statements[loc.statement_index];
+ let StatementKind::Assign(box (target, ref mut rvalue)) = stmt.kind else { bug!() };
+ assert_eq!(target.as_local(), Some(local));
+ f(local, rvalue, loc)
+ }
+ }
+ }
+
/// Compute the equivalence classes for locals, based on copy statements.
///
/// The returned vector maps each local to the one it copies. In the following case:
@@ -215,7 +247,7 @@ impl<'tcx> Visitor<'tcx> for SsaVisitor<'_> {
// so we have to remove them too.
PlaceContext::NonMutatingUse(
NonMutatingUseContext::SharedBorrow
- | NonMutatingUseContext::ShallowBorrow
+ | NonMutatingUseContext::FakeBorrow
| NonMutatingUseContext::AddressOf,
)
| PlaceContext::MutatingUse(_) => {
diff --git a/compiler/rustc_mir_transform/src/unreachable_prop.rs b/compiler/rustc_mir_transform/src/unreachable_prop.rs
index bd1724bf8..0b9311a20 100644
--- a/compiler/rustc_mir_transform/src/unreachable_prop.rs
+++ b/compiler/rustc_mir_transform/src/unreachable_prop.rs
@@ -13,7 +13,11 @@ pub struct UnreachablePropagation;
impl MirPass<'_> for UnreachablePropagation {
fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
// Enable only under -Zmir-opt-level=2 as this can make programs less debuggable.
- sess.mir_opt_level() >= 2
+
+ // FIXME(#116171) Coverage gets confused by MIR passes that can remove all
+ // coverage statements from an instrumented function. This pass can be
+ // re-enabled when coverage codegen is robust against that happening.
+ sess.mir_opt_level() >= 2 && !sess.instrument_coverage()
}
fn run_pass<'tcx>(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
diff --git a/compiler/rustc_monomorphize/messages.ftl b/compiler/rustc_monomorphize/messages.ftl
index fdd47e6f7..2b7d9bd34 100644
--- a/compiler/rustc_monomorphize/messages.ftl
+++ b/compiler/rustc_monomorphize/messages.ftl
@@ -14,6 +14,10 @@ monomorphize_large_assignments =
.label = value moved from here
.note = The current maximum size is {$limit}, but it can be customized with the move_size_limit attribute: `#![move_size_limit = "..."]`
+monomorphize_no_optimized_mir =
+ missing optimized MIR for an item in the crate `{$crate_name}`
+ .note = missing optimized MIR for this item (was the crate `{$crate_name}` compiled with `--emit=metadata`?)
+
monomorphize_recursion_limit =
reached the recursion limit while instantiating `{$shrunk}`
.note = `{$def_path_str}` defined here
diff --git a/compiler/rustc_monomorphize/src/collector.rs b/compiler/rustc_monomorphize/src/collector.rs
index 55b14ce1c..1a9f0e835 100644
--- a/compiler/rustc_monomorphize/src/collector.rs
+++ b/compiler/rustc_monomorphize/src/collector.rs
@@ -170,8 +170,7 @@ use rustc_hir as hir;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{DefId, DefIdMap, LocalDefId};
use rustc_hir::lang_items::LangItem;
-use rustc_middle::mir::interpret::{AllocId, ConstValue};
-use rustc_middle::mir::interpret::{ErrorHandled, GlobalAlloc, Scalar};
+use rustc_middle::mir::interpret::{AllocId, ErrorHandled, GlobalAlloc, Scalar};
use rustc_middle::mir::mono::{InstantiationMode, MonoItem};
use rustc_middle::mir::visit::Visitor as MirVisitor;
use rustc_middle::mir::{self, Local, Location};
@@ -179,8 +178,8 @@ use rustc_middle::query::TyCtxtAt;
use rustc_middle::ty::adjustment::{CustomCoerceUnsized, PointerCoercion};
use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::{
- self, GenericParamDefKind, Instance, InstanceDef, Ty, TyCtxt, TypeFoldable, TypeVisitableExt,
- VtblEntry,
+ self, AssocKind, GenericParamDefKind, Instance, InstanceDef, Ty, TyCtxt, TypeFoldable,
+ TypeVisitableExt, VtblEntry,
};
use rustc_middle::ty::{GenericArgKind, GenericArgs};
use rustc_middle::{middle::codegen_fn_attrs::CodegenFnAttrFlags, mir::visit::TyContext};
@@ -188,11 +187,13 @@ use rustc_session::config::EntryFnType;
use rustc_session::lint::builtin::LARGE_ASSIGNMENTS;
use rustc_session::Limit;
use rustc_span::source_map::{dummy_spanned, respan, Span, Spanned, DUMMY_SP};
+use rustc_span::symbol::{sym, Ident};
use rustc_target::abi::Size;
use std::path::PathBuf;
use crate::errors::{
- EncounteredErrorWhileInstantiating, LargeAssignmentsLint, RecursionLimit, TypeLengthLimit,
+ EncounteredErrorWhileInstantiating, LargeAssignmentsLint, NoOptimizedMir, RecursionLimit,
+ TypeLengthLimit,
};
#[derive(PartialEq)]
@@ -431,7 +432,7 @@ fn collect_items_rec<'tcx>(
hir::InlineAsmOperand::SymFn { anon_const } => {
let fn_ty =
tcx.typeck_body(anon_const.body).node_type(anon_const.hir_id);
- visit_fn_use(tcx, fn_ty, false, *op_sp, &mut used_items);
+ visit_fn_use(tcx, fn_ty, false, *op_sp, &mut used_items, &[]);
}
hir::InlineAsmOperand::SymStatic { path: _, def_id } => {
let instance = Instance::mono(tcx, *def_id);
@@ -457,7 +458,7 @@ fn collect_items_rec<'tcx>(
// Check for PMEs and emit a diagnostic if one happened. To try to show relevant edges of the
// mono item graph.
if tcx.sess.diagnostic().err_count() > error_count
- && starting_item.node.is_generic_fn()
+ && starting_item.node.is_generic_fn(tcx)
&& starting_item.node.is_user_defined()
{
let formatted_item = with_no_trimmed_paths!(starting_item.node.to_string());
@@ -590,6 +591,13 @@ struct MirUsedCollector<'a, 'tcx> {
body: &'a mir::Body<'tcx>,
output: &'a mut MonoItems<'tcx>,
instance: Instance<'tcx>,
+ /// Spans for move size lints already emitted. Helps avoid duplicate lints.
+ move_size_spans: Vec<Span>,
+ /// If true, we should temporarily skip move size checks, because we are
+ /// processing an operand to a `skip_move_check_fns` function call.
+ skip_move_size_check: bool,
+ /// Set of functions for which it is OK to move large data into.
+ skip_move_check_fns: Vec<DefId>,
}
impl<'a, 'tcx> MirUsedCollector<'a, 'tcx> {
@@ -598,12 +606,51 @@ impl<'a, 'tcx> MirUsedCollector<'a, 'tcx> {
T: TypeFoldable<TyCtxt<'tcx>>,
{
debug!("monomorphize: self.instance={:?}", self.instance);
- self.instance.subst_mir_and_normalize_erasing_regions(
+ self.instance.instantiate_mir_and_normalize_erasing_regions(
self.tcx,
ty::ParamEnv::reveal_all(),
ty::EarlyBinder::bind(value),
)
}
+
+ fn check_move_size(&mut self, limit: usize, operand: &mir::Operand<'tcx>, location: Location) {
+ let limit = Size::from_bytes(limit);
+ let ty = operand.ty(self.body, self.tcx);
+ let ty = self.monomorphize(ty);
+ let Ok(layout) = self.tcx.layout_of(ty::ParamEnv::reveal_all().and(ty)) else { return };
+ if layout.size <= limit {
+ return;
+ }
+ debug!(?layout);
+ let source_info = self.body.source_info(location);
+ debug!(?source_info);
+ for span in &self.move_size_spans {
+ if span.overlaps(source_info.span) {
+ return;
+ }
+ }
+ let lint_root = source_info.scope.lint_root(&self.body.source_scopes);
+ debug!(?lint_root);
+ let Some(lint_root) = lint_root else {
+ // This happens when the issue is in a function from a foreign crate that
+ // we monomorphized in the current crate. We can't get a `HirId` for things
+ // in other crates.
+ // FIXME: Find out where to report the lint on. Maybe simply crate-level lint root
+ // but correct span? This would make the lint at least accept crate-level lint attributes.
+ return;
+ };
+ self.tcx.emit_spanned_lint(
+ LARGE_ASSIGNMENTS,
+ lint_root,
+ source_info.span,
+ LargeAssignmentsLint {
+ span: source_info.span,
+ size: layout.size.bytes(),
+ limit: limit.bytes(),
+ },
+ );
+ self.move_size_spans.push(source_info.span);
+ }
}
impl<'a, 'tcx> MirVisitor<'tcx> for MirUsedCollector<'a, 'tcx> {
@@ -649,7 +696,14 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirUsedCollector<'a, 'tcx> {
) => {
let fn_ty = operand.ty(self.body, self.tcx);
let fn_ty = self.monomorphize(fn_ty);
- visit_fn_use(self.tcx, fn_ty, false, span, &mut self.output);
+ visit_fn_use(
+ self.tcx,
+ fn_ty,
+ false,
+ span,
+ &mut self.output,
+ &self.skip_move_check_fns,
+ );
}
mir::Rvalue::Cast(
mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_)),
@@ -692,44 +746,20 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirUsedCollector<'a, 'tcx> {
/// to walk it would attempt to evaluate the `ty::Const` inside, which doesn't necessarily
/// work, as some constants cannot be represented in the type system.
#[instrument(skip(self), level = "debug")]
- fn visit_constant(&mut self, constant: &mir::Constant<'tcx>, location: Location) {
- let literal = self.monomorphize(constant.literal);
- let val = match literal {
- mir::ConstantKind::Val(val, _) => val,
- mir::ConstantKind::Ty(ct) => match ct.kind() {
- ty::ConstKind::Value(val) => self.tcx.valtree_to_const_val((ct.ty(), val)),
- ty::ConstKind::Unevaluated(ct) => {
- debug!(?ct);
- let param_env = ty::ParamEnv::reveal_all();
- match self.tcx.const_eval_resolve(param_env, ct.expand(), None) {
- // The `monomorphize` call should have evaluated that constant already.
- Ok(val) => val,
- Err(ErrorHandled::Reported(_)) => return,
- Err(ErrorHandled::TooGeneric) => span_bug!(
- self.body.source_info(location).span,
- "collection encountered polymorphic constant: {:?}",
- literal
- ),
- }
- }
- _ => return,
- },
- mir::ConstantKind::Unevaluated(uv, _) => {
- let param_env = ty::ParamEnv::reveal_all();
- match self.tcx.const_eval_resolve(param_env, uv, None) {
- // The `monomorphize` call should have evaluated that constant already.
- Ok(val) => val,
- Err(ErrorHandled::Reported(_)) => return,
- Err(ErrorHandled::TooGeneric) => span_bug!(
- self.body.source_info(location).span,
- "collection encountered polymorphic constant: {:?}",
- literal
- ),
- }
- }
+ fn visit_constant(&mut self, constant: &mir::ConstOperand<'tcx>, location: Location) {
+ let const_ = self.monomorphize(constant.const_);
+ let param_env = ty::ParamEnv::reveal_all();
+ let val = match const_.eval(self.tcx, param_env, None) {
+ Ok(v) => v,
+ Err(ErrorHandled::Reported(..)) => return,
+ Err(ErrorHandled::TooGeneric(..)) => span_bug!(
+ self.body.source_info(location).span,
+ "collection encountered polymorphic constant: {:?}",
+ const_
+ ),
};
collect_const_value(self.tcx, val, self.output);
- MirVisitor::visit_ty(self, literal.ty(), TyContext::Location(location));
+ MirVisitor::visit_ty(self, const_.ty(), TyContext::Location(location));
}
fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
@@ -737,11 +767,25 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirUsedCollector<'a, 'tcx> {
let source = self.body.source_info(location).span;
let tcx = self.tcx;
+ let push_mono_lang_item = |this: &mut Self, lang_item: LangItem| {
+ let instance = Instance::mono(tcx, tcx.require_lang_item(lang_item, Some(source)));
+ if should_codegen_locally(tcx, &instance) {
+ this.output.push(create_fn_mono_item(tcx, instance, source));
+ }
+ };
+
match terminator.kind {
mir::TerminatorKind::Call { ref func, .. } => {
let callee_ty = func.ty(self.body, tcx);
let callee_ty = self.monomorphize(callee_ty);
- visit_fn_use(self.tcx, callee_ty, true, source, &mut self.output)
+ self.skip_move_size_check = visit_fn_use(
+ self.tcx,
+ callee_ty,
+ true,
+ source,
+ &mut self.output,
+ &self.skip_move_check_fns,
+ )
}
mir::TerminatorKind::Drop { ref place, .. } => {
let ty = place.ty(self.body, self.tcx).ty;
@@ -752,8 +796,8 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirUsedCollector<'a, 'tcx> {
for op in operands {
match *op {
mir::InlineAsmOperand::SymFn { ref value } => {
- let fn_ty = self.monomorphize(value.literal.ty());
- visit_fn_use(self.tcx, fn_ty, false, source, &mut self.output);
+ let fn_ty = self.monomorphize(value.const_.ty());
+ visit_fn_use(self.tcx, fn_ty, false, source, &mut self.output, &[]);
}
mir::InlineAsmOperand::SymStatic { def_id } => {
let instance = Instance::mono(self.tcx, def_id);
@@ -771,23 +815,14 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirUsedCollector<'a, 'tcx> {
mir::AssertKind::BoundsCheck { .. } => LangItem::PanicBoundsCheck,
_ => LangItem::Panic,
};
- let instance = Instance::mono(tcx, tcx.require_lang_item(lang_item, Some(source)));
- if should_codegen_locally(tcx, &instance) {
- self.output.push(create_fn_mono_item(tcx, instance, source));
- }
+ push_mono_lang_item(self, lang_item);
}
- mir::TerminatorKind::Terminate { .. } => {
- let instance = Instance::mono(
- tcx,
- tcx.require_lang_item(LangItem::PanicCannotUnwind, Some(source)),
- );
- if should_codegen_locally(tcx, &instance) {
- self.output.push(create_fn_mono_item(tcx, instance, source));
- }
+ mir::TerminatorKind::UnwindTerminate(reason) => {
+ push_mono_lang_item(self, reason.lang_item());
}
mir::TerminatorKind::Goto { .. }
| mir::TerminatorKind::SwitchInt { .. }
- | mir::TerminatorKind::Resume
+ | mir::TerminatorKind::UnwindResume
| mir::TerminatorKind::Return
| mir::TerminatorKind::Unreachable => {}
mir::TerminatorKind::GeneratorDrop
@@ -796,55 +831,19 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirUsedCollector<'a, 'tcx> {
| mir::TerminatorKind::FalseUnwind { .. } => bug!(),
}
- if let Some(mir::UnwindAction::Terminate) = terminator.unwind() {
- let instance = Instance::mono(
- tcx,
- tcx.require_lang_item(LangItem::PanicCannotUnwind, Some(source)),
- );
- if should_codegen_locally(tcx, &instance) {
- self.output.push(create_fn_mono_item(tcx, instance, source));
- }
+ if let Some(mir::UnwindAction::Terminate(reason)) = terminator.unwind() {
+ push_mono_lang_item(self, reason.lang_item());
}
self.super_terminator(terminator, location);
+ self.skip_move_size_check = false;
}
fn visit_operand(&mut self, operand: &mir::Operand<'tcx>, location: Location) {
self.super_operand(operand, location);
- let limit = self.tcx.move_size_limit().0;
- if limit == 0 {
- return;
- }
- let limit = Size::from_bytes(limit);
- let ty = operand.ty(self.body, self.tcx);
- let ty = self.monomorphize(ty);
- let layout = self.tcx.layout_of(ty::ParamEnv::reveal_all().and(ty));
- if let Ok(layout) = layout {
- if layout.size > limit {
- debug!(?layout);
- let source_info = self.body.source_info(location);
- debug!(?source_info);
- let lint_root = source_info.scope.lint_root(&self.body.source_scopes);
- debug!(?lint_root);
- let Some(lint_root) = lint_root else {
- // This happens when the issue is in a function from a foreign crate that
- // we monomorphized in the current crate. We can't get a `HirId` for things
- // in other crates.
- // FIXME: Find out where to report the lint on. Maybe simply crate-level lint root
- // but correct span? This would make the lint at least accept crate-level lint attributes.
- return;
- };
- self.tcx.emit_spanned_lint(
- LARGE_ASSIGNMENTS,
- lint_root,
- source_info.span,
- LargeAssignmentsLint {
- span: source_info.span,
- size: layout.size.bytes(),
- limit: limit.bytes(),
- },
- )
- }
+ let move_size_limit = self.tcx.move_size_limit().0;
+ if move_size_limit > 0 && !self.skip_move_size_check {
+ self.check_move_size(move_size_limit, operand, location);
}
}
@@ -874,8 +873,11 @@ fn visit_fn_use<'tcx>(
is_direct_call: bool,
source: Span,
output: &mut MonoItems<'tcx>,
-) {
+ skip_move_check_fns: &[DefId],
+) -> bool {
+ let mut skip_move_size_check = false;
if let ty::FnDef(def_id, args) = *ty.kind() {
+ skip_move_size_check = skip_move_check_fns.contains(&def_id);
let instance = if is_direct_call {
ty::Instance::expect_resolve(tcx, ty::ParamEnv::reveal_all(), def_id, args)
} else {
@@ -886,6 +888,7 @@ fn visit_fn_use<'tcx>(
};
visit_instance_use(tcx, instance, is_direct_call, source, output);
}
+ skip_move_size_check
}
fn visit_instance_use<'tcx>(
@@ -958,7 +961,10 @@ fn should_codegen_locally<'tcx>(tcx: TyCtxt<'tcx>, instance: &Instance<'tcx>) ->
}
if !tcx.is_mir_available(def_id) {
- bug!("no MIR available for {:?}", def_id);
+ tcx.sess.emit_fatal(NoOptimizedMir {
+ span: tcx.def_span(def_id),
+ crate_name: tcx.crate_name(def_id.krate),
+ });
}
true
@@ -1284,6 +1290,7 @@ fn create_mono_items_for_default_impls<'tcx>(
// it, to validate whether or not the impl is legal to instantiate at all.
let only_region_params = |param: &ty::GenericParamDef, _: &_| match param.kind {
GenericParamDefKind::Lifetime => tcx.lifetimes.re_erased.into(),
+ GenericParamDefKind::Const { is_host_effect: true, .. } => tcx.consts.true_.into(),
GenericParamDefKind::Type { .. } | GenericParamDefKind::Const { .. } => {
unreachable!(
"`own_requires_monomorphization` check means that \
@@ -1363,6 +1370,31 @@ fn collect_alloc<'tcx>(tcx: TyCtxt<'tcx>, alloc_id: AllocId, output: &mut MonoIt
}
}
+fn add_assoc_fn<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: Option<DefId>,
+ fn_ident: Ident,
+ skip_move_check_fns: &mut Vec<DefId>,
+) {
+ if let Some(def_id) = def_id.and_then(|def_id| assoc_fn_of_type(tcx, def_id, fn_ident)) {
+ skip_move_check_fns.push(def_id);
+ }
+}
+
+fn assoc_fn_of_type<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, fn_ident: Ident) -> Option<DefId> {
+ for impl_def_id in tcx.inherent_impls(def_id) {
+ if let Some(new) = tcx.associated_items(impl_def_id).find_by_name_and_kind(
+ tcx,
+ fn_ident,
+ AssocKind::Fn,
+ def_id,
+ ) {
+ return Some(new.def_id);
+ }
+ }
+ return None;
+}
+
/// Scans the MIR in order to find function calls, closures, and drop-glue.
#[instrument(skip(tcx, output), level = "debug")]
fn collect_used_items<'tcx>(
@@ -1371,19 +1403,54 @@ fn collect_used_items<'tcx>(
output: &mut MonoItems<'tcx>,
) {
let body = tcx.instance_mir(instance.def);
- MirUsedCollector { tcx, body: &body, output, instance }.visit_body(&body);
+
+ let mut skip_move_check_fns = vec![];
+ if tcx.move_size_limit().0 > 0 {
+ add_assoc_fn(
+ tcx,
+ tcx.lang_items().owned_box(),
+ Ident::from_str("new"),
+ &mut skip_move_check_fns,
+ );
+ add_assoc_fn(
+ tcx,
+ tcx.get_diagnostic_item(sym::Arc),
+ Ident::from_str("new"),
+ &mut skip_move_check_fns,
+ );
+ add_assoc_fn(
+ tcx,
+ tcx.get_diagnostic_item(sym::Rc),
+ Ident::from_str("new"),
+ &mut skip_move_check_fns,
+ );
+ }
+
+ MirUsedCollector {
+ tcx,
+ body: &body,
+ output,
+ instance,
+ move_size_spans: vec![],
+ skip_move_size_check: false,
+ skip_move_check_fns,
+ }
+ .visit_body(&body);
}
#[instrument(skip(tcx, output), level = "debug")]
fn collect_const_value<'tcx>(
tcx: TyCtxt<'tcx>,
- value: ConstValue<'tcx>,
+ value: mir::ConstValue<'tcx>,
output: &mut MonoItems<'tcx>,
) {
match value {
- ConstValue::Scalar(Scalar::Ptr(ptr, _size)) => collect_alloc(tcx, ptr.provenance, output),
- ConstValue::Slice { data: alloc, start: _, end: _ } | ConstValue::ByRef { alloc, .. } => {
- for &id in alloc.inner().provenance().ptrs().values() {
+ mir::ConstValue::Scalar(Scalar::Ptr(ptr, _size)) => {
+ collect_alloc(tcx, ptr.provenance, output)
+ }
+ mir::ConstValue::Indirect { alloc_id, .. } => collect_alloc(tcx, alloc_id, output),
+ mir::ConstValue::Slice { data, meta: _ } => {
+ for &id in data.inner().provenance().ptrs().values() {
collect_alloc(tcx, id, output);
}
}
diff --git a/compiler/rustc_monomorphize/src/errors.rs b/compiler/rustc_monomorphize/src/errors.rs
index 495a73490..fdcc95f13 100644
--- a/compiler/rustc_monomorphize/src/errors.rs
+++ b/compiler/rustc_monomorphize/src/errors.rs
@@ -4,7 +4,7 @@ use crate::fluent_generated as fluent;
use rustc_errors::ErrorGuaranteed;
use rustc_errors::IntoDiagnostic;
use rustc_macros::{Diagnostic, LintDiagnostic};
-use rustc_span::Span;
+use rustc_span::{Span, Symbol};
#[derive(Diagnostic)]
#[diag(monomorphize_recursion_limit)]
@@ -33,6 +33,14 @@ pub struct TypeLengthLimit {
pub type_length: usize,
}
+#[derive(Diagnostic)]
+#[diag(monomorphize_no_optimized_mir)]
+pub struct NoOptimizedMir {
+ #[note]
+ pub span: Span,
+ pub crate_name: Symbol,
+}
+
pub struct UnusedGenericParamsHint {
pub span: Span,
pub param_spans: Vec<Span>,
diff --git a/compiler/rustc_monomorphize/src/partitioning.rs b/compiler/rustc_monomorphize/src/partitioning.rs
index de6db8ae6..1d8cbe0e2 100644
--- a/compiler/rustc_monomorphize/src/partitioning.rs
+++ b/compiler/rustc_monomorphize/src/partitioning.rs
@@ -221,7 +221,7 @@ where
}
let characteristic_def_id = characteristic_def_id_of_mono_item(cx.tcx, mono_item);
- let is_volatile = is_incremental_build && mono_item.is_generic_fn();
+ let is_volatile = is_incremental_build && mono_item.is_generic_fn(cx.tcx);
let cgu_name = match characteristic_def_id {
Some(def_id) => compute_codegen_unit_name(
@@ -647,7 +647,7 @@ fn characteristic_def_id_of_mono_item<'tcx>(
// parameters, but the self-type of their impl block do will fail to normalize.
if !tcx.sess.opts.unstable_opts.polymorphize || !instance.has_param() {
// This is a method within an impl, find out what the self-type is:
- let impl_self_ty = tcx.subst_and_normalize_erasing_regions(
+ let impl_self_ty = tcx.instantiate_and_normalize_erasing_regions(
instance.args,
ty::ParamEnv::reveal_all(),
tcx.type_of(impl_def_id),
@@ -801,7 +801,7 @@ fn mono_item_visibility<'tcx>(
return Visibility::Hidden;
}
- let is_generic = instance.args.non_erasable_generics().next().is_some();
+ let is_generic = instance.args.non_erasable_generics(tcx, def_id).next().is_some();
// Upstream `DefId` instances get different handling than local ones.
let Some(def_id) = def_id.as_local() else {
diff --git a/compiler/rustc_monomorphize/src/polymorphize.rs b/compiler/rustc_monomorphize/src/polymorphize.rs
index a8b7a0dbb..6c206a6ba 100644
--- a/compiler/rustc_monomorphize/src/polymorphize.rs
+++ b/compiler/rustc_monomorphize/src/polymorphize.rs
@@ -9,13 +9,13 @@ use rustc_hir::{def::DefKind, def_id::DefId, ConstContext};
use rustc_middle::mir::{
self,
visit::{TyContext, Visitor},
- Constant, ConstantKind, Local, LocalDecl, Location,
+ Local, LocalDecl, Location,
};
use rustc_middle::query::Providers;
use rustc_middle::ty::{
self,
visit::{TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor},
- Const, GenericArgsRef, Ty, TyCtxt, UnusedGenericParams,
+ GenericArgsRef, Ty, TyCtxt, UnusedGenericParams,
};
use rustc_span::symbol::sym;
use std::ops::ControlFlow;
@@ -143,7 +143,7 @@ fn mark_used_by_default_parameters<'tcx>(
| DefKind::Enum
| DefKind::Variant
| DefKind::Trait
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::AssocTy
@@ -261,12 +261,12 @@ impl<'a, 'tcx> Visitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> {
self.super_local_decl(local, local_decl);
}
- fn visit_constant(&mut self, ct: &Constant<'tcx>, location: Location) {
- match ct.literal {
- ConstantKind::Ty(c) => {
+ fn visit_constant(&mut self, ct: &mir::ConstOperand<'tcx>, location: Location) {
+ match ct.const_ {
+ mir::Const::Ty(c) => {
c.visit_with(self);
}
- ConstantKind::Unevaluated(mir::UnevaluatedConst { def, args: _, promoted }, ty) => {
+ mir::Const::Unevaluated(mir::UnevaluatedConst { def, args: _, promoted }, ty) => {
// Avoid considering `T` unused when constants are of the form:
// `<Self as Foo<T>>::foo::promoted[p]`
if let Some(p) = promoted {
@@ -280,7 +280,7 @@ impl<'a, 'tcx> Visitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> {
Visitor::visit_ty(self, ty, TyContext::Location(location));
}
- ConstantKind::Val(_, ty) => Visitor::visit_ty(self, ty, TyContext::Location(location)),
+ mir::Const::Val(_, ty) => Visitor::visit_ty(self, ty, TyContext::Location(location)),
}
}
@@ -291,7 +291,7 @@ impl<'a, 'tcx> Visitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> {
impl<'a, 'tcx> TypeVisitor<TyCtxt<'tcx>> for MarkUsedGenericParams<'a, 'tcx> {
#[instrument(level = "debug", skip(self))]
- fn visit_const(&mut self, c: Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
if !c.has_non_region_param() {
return ControlFlow::Continue(());
}
diff --git a/compiler/rustc_monomorphize/src/util.rs b/compiler/rustc_monomorphize/src/util.rs
index a3433d3d1..e25c5c9f2 100644
--- a/compiler/rustc_monomorphize/src/util.rs
+++ b/compiler/rustc_monomorphize/src/util.rs
@@ -26,12 +26,12 @@ pub(crate) fn dump_closure_profile<'tcx>(tcx: TyCtxt<'tcx>, closure_instance: In
let ClosureSizeProfileData { before_feature_tys, after_feature_tys } =
typeck_results.closure_size_eval[&closure_def_id];
- let before_feature_tys = tcx.subst_and_normalize_erasing_regions(
+ let before_feature_tys = tcx.instantiate_and_normalize_erasing_regions(
closure_instance.args,
param_env,
ty::EarlyBinder::bind(before_feature_tys),
);
- let after_feature_tys = tcx.subst_and_normalize_erasing_regions(
+ let after_feature_tys = tcx.instantiate_and_normalize_erasing_regions(
closure_instance.args,
param_env,
ty::EarlyBinder::bind(after_feature_tys),
diff --git a/compiler/rustc_parse/messages.ftl b/compiler/rustc_parse/messages.ftl
index 34cc0998c..05b6c4062 100644
--- a/compiler/rustc_parse/messages.ftl
+++ b/compiler/rustc_parse/messages.ftl
@@ -196,6 +196,9 @@ parse_expected_else_block = expected `{"{"}`, found {$first_tok}
.suggestion = add an `if` if this is the condition of a chained `else if` statement
parse_expected_expression_found_let = expected expression, found `let` statement
+ .note = only supported directly in conditions of `if` and `while` expressions
+ .not_supported_or = `||` operators are not supported in let chain expressions
+ .not_supported_parentheses = `let`s wrapped in parentheses are not supported in a context with let chains
parse_expected_fn_path_found_fn_keyword = expected identifier, found keyword `fn`
.suggestion = use `Fn` to refer to the trait
@@ -506,7 +509,7 @@ parse_maybe_fn_typo_with_impl = you might have meant to write `impl` instead of
parse_maybe_recover_from_bad_qpath_stage_2 =
missing angle brackets in associated item path
- .suggestion = try: `{$ty}`
+ .suggestion = types that don't start with an identifier need to be surrounded with angle brackets in qualified paths
parse_maybe_recover_from_bad_type_plus =
expected a path on the left-hand side of `+`, not `{$ty}`
diff --git a/compiler/rustc_parse/src/errors.rs b/compiler/rustc_parse/src/errors.rs
index e0b1e3678..7c75e440a 100644
--- a/compiler/rustc_parse/src/errors.rs
+++ b/compiler/rustc_parse/src/errors.rs
@@ -10,7 +10,7 @@ use rustc_span::symbol::Ident;
use rustc_span::{Span, Symbol};
use crate::fluent_generated as fluent;
-use crate::parser::TokenDescription;
+use crate::parser::{ForbiddenLetReason, TokenDescription};
#[derive(Diagnostic)]
#[diag(parse_maybe_report_ambiguous_plus)]
@@ -59,9 +59,18 @@ pub(crate) enum BadTypePlusSub {
#[diag(parse_maybe_recover_from_bad_qpath_stage_2)]
pub(crate) struct BadQPathStage2 {
#[primary_span]
- #[suggestion(code = "", applicability = "maybe-incorrect")]
pub span: Span,
- pub ty: String,
+ #[subdiagnostic]
+ pub wrap: WrapType,
+}
+
+#[derive(Subdiagnostic)]
+#[multipart_suggestion(parse_suggestion, applicability = "machine-applicable")]
+pub(crate) struct WrapType {
+ #[suggestion_part(code = "<")]
+ pub lo: Span,
+ #[suggestion_part(code = ">")]
+ pub hi: Span,
}
#[derive(Diagnostic)]
@@ -392,9 +401,12 @@ pub(crate) struct IfExpressionMissingCondition {
#[derive(Diagnostic)]
#[diag(parse_expected_expression_found_let)]
+#[note]
pub(crate) struct ExpectedExpressionFoundLet {
#[primary_span]
pub span: Span,
+ #[subdiagnostic]
+ pub reason: ForbiddenLetReason,
}
#[derive(Diagnostic)]
diff --git a/compiler/rustc_parse/src/lib.rs b/compiler/rustc_parse/src/lib.rs
index 892be36aa..c012a8663 100644
--- a/compiler/rustc_parse/src/lib.rs
+++ b/compiler/rustc_parse/src/lib.rs
@@ -8,7 +8,7 @@
#![feature(never_type)]
#![feature(rustc_attrs)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#[macro_use]
extern crate tracing;
@@ -132,7 +132,7 @@ fn maybe_source_file_to_parser(
sess: &ParseSess,
source_file: Lrc<SourceFile>,
) -> Result<Parser<'_>, Vec<Diagnostic>> {
- let end_pos = source_file.end_pos;
+ let end_pos = source_file.end_position();
let stream = maybe_file_to_stream(sess, source_file, None)?;
let mut parser = stream_to_parser(sess, stream, None);
if parser.token == token::Eof {
diff --git a/compiler/rustc_parse/src/parser/attr_wrapper.rs b/compiler/rustc_parse/src/parser/attr_wrapper.rs
index 5d6c574ba..c4e8d9006 100644
--- a/compiler/rustc_parse/src/parser/attr_wrapper.rs
+++ b/compiler/rustc_parse/src/parser/attr_wrapper.rs
@@ -106,7 +106,7 @@ impl ToAttrTokenStream for LazyAttrTokenStreamImpl {
let mut cursor_snapshot = self.cursor_snapshot.clone();
let tokens =
std::iter::once((FlatToken::Token(self.start_token.0.clone()), self.start_token.1))
- .chain((0..self.num_calls).map(|_| {
+ .chain(std::iter::repeat_with(|| {
let token = cursor_snapshot.next();
(FlatToken::Token(token.0), token.1)
}))
diff --git a/compiler/rustc_parse/src/parser/diagnostics.rs b/compiler/rustc_parse/src/parser/diagnostics.rs
index 6c8ef3406..06b1b1523 100644
--- a/compiler/rustc_parse/src/parser/diagnostics.rs
+++ b/compiler/rustc_parse/src/parser/diagnostics.rs
@@ -16,7 +16,7 @@ use crate::errors::{
StructLiteralBodyWithoutPath, StructLiteralBodyWithoutPathSugg, StructLiteralNeedingParens,
StructLiteralNeedingParensSugg, SuggAddMissingLetStmt, SuggEscapeIdentifier, SuggRemoveComma,
TernaryOperator, UnexpectedConstInGenericParam, UnexpectedConstParamDeclaration,
- UnexpectedConstParamDeclarationSugg, UnmatchedAngleBrackets, UseEqInstead,
+ UnexpectedConstParamDeclarationSugg, UnmatchedAngleBrackets, UseEqInstead, WrapType,
};
use crate::fluent_generated as fluent;
@@ -1589,10 +1589,9 @@ impl<'a> Parser<'a> {
self.parse_path_segments(&mut path.segments, T::PATH_STYLE, None)?;
path.span = ty_span.to(self.prev_token.span);
- let ty_str = self.span_to_snippet(ty_span).unwrap_or_else(|_| pprust::ty_to_string(&ty));
self.sess.emit_err(BadQPathStage2 {
- span: path.span,
- ty: format!("<{}>::{}", ty_str, pprust::path_to_string(&path)),
+ span: ty_span,
+ wrap: WrapType { lo: ty_span.shrink_to_lo(), hi: ty_span.shrink_to_hi() },
});
let path_span = ty_span.shrink_to_hi(); // Use an empty path since `position == 0`.
diff --git a/compiler/rustc_parse/src/parser/expr.rs b/compiler/rustc_parse/src/parser/expr.rs
index 9ae3ef617..f4cee3a66 100644
--- a/compiler/rustc_parse/src/parser/expr.rs
+++ b/compiler/rustc_parse/src/parser/expr.rs
@@ -8,6 +8,7 @@ use super::{
use crate::errors;
use crate::maybe_recover_from_interpolated_ty_qpath;
+use ast::mut_visit::{noop_visit_expr, MutVisitor};
use ast::{Path, PathSegment};
use core::mem;
use rustc_ast::ptr::P;
@@ -27,6 +28,7 @@ use rustc_errors::{
AddToDiagnostic, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, IntoDiagnostic,
PResult, StashKey,
};
+use rustc_macros::Subdiagnostic;
use rustc_session::errors::{report_lit_error, ExprParenthesesNeeded};
use rustc_session::lint::builtin::BREAK_WITH_LABEL_AND_LOOP;
use rustc_session::lint::BuiltinLintDiagnostics;
@@ -122,8 +124,8 @@ impl<'a> Parser<'a> {
self.parse_expr().map(|value| AnonConst { id: DUMMY_NODE_ID, value })
}
- fn parse_expr_catch_underscore(&mut self) -> PResult<'a, P<Expr>> {
- match self.parse_expr() {
+ fn parse_expr_catch_underscore(&mut self, restrictions: Restrictions) -> PResult<'a, P<Expr>> {
+ match self.parse_expr_res(restrictions, None) {
Ok(expr) => Ok(expr),
Err(mut err) => match self.token.ident() {
Some((Ident { name: kw::Underscore, .. }, false))
@@ -141,7 +143,8 @@ impl<'a> Parser<'a> {
/// Parses a sequence of expressions delimited by parentheses.
fn parse_expr_paren_seq(&mut self) -> PResult<'a, ThinVec<P<Expr>>> {
- self.parse_paren_comma_seq(|p| p.parse_expr_catch_underscore()).map(|(r, _)| r)
+ self.parse_paren_comma_seq(|p| p.parse_expr_catch_underscore(Restrictions::empty()))
+ .map(|(r, _)| r)
}
/// Parses an expression, subject to the given restrictions.
@@ -1345,110 +1348,113 @@ impl<'a> Parser<'a> {
// Outer attributes are already parsed and will be
// added to the return value after the fact.
- // Note: when adding new syntax here, don't forget to adjust `TokenKind::can_begin_expr()`.
- let lo = self.token.span;
- if let token::Literal(_) = self.token.kind {
- // This match arm is a special-case of the `_` match arm below and
- // could be removed without changing functionality, but it's faster
- // to have it here, especially for programs with large constants.
- self.parse_expr_lit()
- } else if self.check(&token::OpenDelim(Delimiter::Parenthesis)) {
- self.parse_expr_tuple_parens()
- } else if self.check(&token::OpenDelim(Delimiter::Brace)) {
- self.parse_expr_block(None, lo, BlockCheckMode::Default)
- } else if self.check(&token::BinOp(token::Or)) || self.check(&token::OrOr) {
- self.parse_expr_closure().map_err(|mut err| {
- // If the input is something like `if a { 1 } else { 2 } | if a { 3 } else { 4 }`
- // then suggest parens around the lhs.
- if let Some(sp) = self.sess.ambiguous_block_expr_parse.borrow().get(&lo) {
- err.subdiagnostic(ExprParenthesesNeeded::surrounding(*sp));
+ let restrictions = self.restrictions;
+ self.with_res(restrictions - Restrictions::ALLOW_LET, |this| {
+ // Note: when adding new syntax here, don't forget to adjust `TokenKind::can_begin_expr()`.
+ let lo = this.token.span;
+ if let token::Literal(_) = this.token.kind {
+ // This match arm is a special-case of the `_` match arm below and
+ // could be removed without changing functionality, but it's faster
+ // to have it here, especially for programs with large constants.
+ this.parse_expr_lit()
+ } else if this.check(&token::OpenDelim(Delimiter::Parenthesis)) {
+ this.parse_expr_tuple_parens(restrictions)
+ } else if this.check(&token::OpenDelim(Delimiter::Brace)) {
+ this.parse_expr_block(None, lo, BlockCheckMode::Default)
+ } else if this.check(&token::BinOp(token::Or)) || this.check(&token::OrOr) {
+ this.parse_expr_closure().map_err(|mut err| {
+ // If the input is something like `if a { 1 } else { 2 } | if a { 3 } else { 4 }`
+ // then suggest parens around the lhs.
+ if let Some(sp) = this.sess.ambiguous_block_expr_parse.borrow().get(&lo) {
+ err.subdiagnostic(ExprParenthesesNeeded::surrounding(*sp));
+ }
+ err
+ })
+ } else if this.check(&token::OpenDelim(Delimiter::Bracket)) {
+ this.parse_expr_array_or_repeat(Delimiter::Bracket)
+ } else if this.is_builtin() {
+ this.parse_expr_builtin()
+ } else if this.check_path() {
+ this.parse_expr_path_start()
+ } else if this.check_keyword(kw::Move)
+ || this.check_keyword(kw::Static)
+ || this.check_const_closure()
+ {
+ this.parse_expr_closure()
+ } else if this.eat_keyword(kw::If) {
+ this.parse_expr_if()
+ } else if this.check_keyword(kw::For) {
+ if this.choose_generics_over_qpath(1) {
+ this.parse_expr_closure()
+ } else {
+ assert!(this.eat_keyword(kw::For));
+ this.parse_expr_for(None, this.prev_token.span)
}
- err
- })
- } else if self.check(&token::OpenDelim(Delimiter::Bracket)) {
- self.parse_expr_array_or_repeat(Delimiter::Bracket)
- } else if self.is_builtin() {
- self.parse_expr_builtin()
- } else if self.check_path() {
- self.parse_expr_path_start()
- } else if self.check_keyword(kw::Move)
- || self.check_keyword(kw::Static)
- || self.check_const_closure()
- {
- self.parse_expr_closure()
- } else if self.eat_keyword(kw::If) {
- self.parse_expr_if()
- } else if self.check_keyword(kw::For) {
- if self.choose_generics_over_qpath(1) {
- self.parse_expr_closure()
- } else {
- assert!(self.eat_keyword(kw::For));
- self.parse_expr_for(None, self.prev_token.span)
- }
- } else if self.eat_keyword(kw::While) {
- self.parse_expr_while(None, self.prev_token.span)
- } else if let Some(label) = self.eat_label() {
- self.parse_expr_labeled(label, true)
- } else if self.eat_keyword(kw::Loop) {
- let sp = self.prev_token.span;
- self.parse_expr_loop(None, self.prev_token.span).map_err(|mut err| {
- err.span_label(sp, "while parsing this `loop` expression");
- err
- })
- } else if self.eat_keyword(kw::Match) {
- let match_sp = self.prev_token.span;
- self.parse_expr_match().map_err(|mut err| {
- err.span_label(match_sp, "while parsing this `match` expression");
- err
- })
- } else if self.eat_keyword(kw::Unsafe) {
- let sp = self.prev_token.span;
- self.parse_expr_block(None, lo, BlockCheckMode::Unsafe(ast::UserProvided)).map_err(
- |mut err| {
- err.span_label(sp, "while parsing this `unsafe` expression");
+ } else if this.eat_keyword(kw::While) {
+ this.parse_expr_while(None, this.prev_token.span)
+ } else if let Some(label) = this.eat_label() {
+ this.parse_expr_labeled(label, true)
+ } else if this.eat_keyword(kw::Loop) {
+ let sp = this.prev_token.span;
+ this.parse_expr_loop(None, this.prev_token.span).map_err(|mut err| {
+ err.span_label(sp, "while parsing this `loop` expression");
err
- },
- )
- } else if self.check_inline_const(0) {
- self.parse_const_block(lo.to(self.token.span), false)
- } else if self.may_recover() && self.is_do_catch_block() {
- self.recover_do_catch()
- } else if self.is_try_block() {
- self.expect_keyword(kw::Try)?;
- self.parse_try_block(lo)
- } else if self.eat_keyword(kw::Return) {
- self.parse_expr_return()
- } else if self.eat_keyword(kw::Continue) {
- self.parse_expr_continue(lo)
- } else if self.eat_keyword(kw::Break) {
- self.parse_expr_break()
- } else if self.eat_keyword(kw::Yield) {
- self.parse_expr_yield()
- } else if self.is_do_yeet() {
- self.parse_expr_yeet()
- } else if self.eat_keyword(kw::Become) {
- self.parse_expr_become()
- } else if self.check_keyword(kw::Let) {
- self.parse_expr_let()
- } else if self.eat_keyword(kw::Underscore) {
- Ok(self.mk_expr(self.prev_token.span, ExprKind::Underscore))
- } else if self.token.uninterpolated_span().at_least_rust_2018() {
- // `Span:.at_least_rust_2018()` is somewhat expensive; don't get it repeatedly.
- if self.check_keyword(kw::Async) {
- if self.is_async_block() {
- // Check for `async {` and `async move {`.
- self.parse_async_block()
+ })
+ } else if this.eat_keyword(kw::Match) {
+ let match_sp = this.prev_token.span;
+ this.parse_expr_match().map_err(|mut err| {
+ err.span_label(match_sp, "while parsing this `match` expression");
+ err
+ })
+ } else if this.eat_keyword(kw::Unsafe) {
+ let sp = this.prev_token.span;
+ this.parse_expr_block(None, lo, BlockCheckMode::Unsafe(ast::UserProvided)).map_err(
+ |mut err| {
+ err.span_label(sp, "while parsing this `unsafe` expression");
+ err
+ },
+ )
+ } else if this.check_inline_const(0) {
+ this.parse_const_block(lo.to(this.token.span), false)
+ } else if this.may_recover() && this.is_do_catch_block() {
+ this.recover_do_catch()
+ } else if this.is_try_block() {
+ this.expect_keyword(kw::Try)?;
+ this.parse_try_block(lo)
+ } else if this.eat_keyword(kw::Return) {
+ this.parse_expr_return()
+ } else if this.eat_keyword(kw::Continue) {
+ this.parse_expr_continue(lo)
+ } else if this.eat_keyword(kw::Break) {
+ this.parse_expr_break()
+ } else if this.eat_keyword(kw::Yield) {
+ this.parse_expr_yield()
+ } else if this.is_do_yeet() {
+ this.parse_expr_yeet()
+ } else if this.eat_keyword(kw::Become) {
+ this.parse_expr_become()
+ } else if this.check_keyword(kw::Let) {
+ this.parse_expr_let(restrictions)
+ } else if this.eat_keyword(kw::Underscore) {
+ Ok(this.mk_expr(this.prev_token.span, ExprKind::Underscore))
+ } else if this.token.uninterpolated_span().at_least_rust_2018() {
+ // `Span:.at_least_rust_2018()` is somewhat expensive; don't get it repeatedly.
+ if this.check_keyword(kw::Async) {
+ if this.is_async_block() {
+ // Check for `async {` and `async move {`.
+ this.parse_async_block()
+ } else {
+ this.parse_expr_closure()
+ }
+ } else if this.eat_keyword(kw::Await) {
+ this.recover_incorrect_await_syntax(lo, this.prev_token.span)
} else {
- self.parse_expr_closure()
+ this.parse_expr_lit()
}
- } else if self.eat_keyword(kw::Await) {
- self.recover_incorrect_await_syntax(lo, self.prev_token.span)
} else {
- self.parse_expr_lit()
+ this.parse_expr_lit()
}
- } else {
- self.parse_expr_lit()
- }
+ })
}
fn parse_expr_lit(&mut self) -> PResult<'a, P<Expr>> {
@@ -1462,13 +1468,13 @@ impl<'a> Parser<'a> {
}
}
- fn parse_expr_tuple_parens(&mut self) -> PResult<'a, P<Expr>> {
+ fn parse_expr_tuple_parens(&mut self, restrictions: Restrictions) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
self.expect(&token::OpenDelim(Delimiter::Parenthesis))?;
let (es, trailing_comma) = match self.parse_seq_to_end(
&token::CloseDelim(Delimiter::Parenthesis),
SeqSep::trailing_allowed(token::Comma),
- |p| p.parse_expr_catch_underscore(),
+ |p| p.parse_expr_catch_underscore(restrictions.intersection(Restrictions::ALLOW_LET)),
) {
Ok(x) => x,
Err(err) => {
@@ -2231,7 +2237,8 @@ impl<'a> Parser<'a> {
let decl_hi = self.prev_token.span;
let mut body = match fn_decl.output {
FnRetTy::Default(_) => {
- let restrictions = self.restrictions - Restrictions::STMT_EXPR;
+ let restrictions =
+ self.restrictions - Restrictions::STMT_EXPR - Restrictions::ALLOW_LET;
self.parse_expr_res(restrictions, None)?
}
_ => {
@@ -2436,10 +2443,12 @@ impl<'a> Parser<'a> {
/// Parses the condition of a `if` or `while` expression.
fn parse_expr_cond(&mut self) -> PResult<'a, P<Expr>> {
- let cond =
+ let mut cond =
self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL | Restrictions::ALLOW_LET, None)?;
- if let ExprKind::Let(..) = cond.kind {
+ CondChecker { parser: self, forbid_let_reason: None }.visit_expr(&mut cond);
+
+ if let ExprKind::Let(_, _, _, None) = cond.kind {
// Remove the last feature gating of a `let` expression since it's stable.
self.sess.gated_spans.ungate_last(sym::let_chains, cond.span);
}
@@ -2448,18 +2457,15 @@ impl<'a> Parser<'a> {
}
/// Parses a `let $pat = $expr` pseudo-expression.
- fn parse_expr_let(&mut self) -> PResult<'a, P<Expr>> {
- // This is a *approximate* heuristic that detects if `let` chains are
- // being parsed in the right position. It's approximate because it
- // doesn't deny all invalid `let` expressions, just completely wrong usages.
- let not_in_chain = !matches!(
- self.prev_token.kind,
- TokenKind::AndAnd | TokenKind::Ident(kw::If, _) | TokenKind::Ident(kw::While, _)
- );
- if !self.restrictions.contains(Restrictions::ALLOW_LET) || not_in_chain {
- self.sess.emit_err(errors::ExpectedExpressionFoundLet { span: self.token.span });
- }
-
+ fn parse_expr_let(&mut self, restrictions: Restrictions) -> PResult<'a, P<Expr>> {
+ let is_recovered = if !restrictions.contains(Restrictions::ALLOW_LET) {
+ Some(self.sess.emit_err(errors::ExpectedExpressionFoundLet {
+ span: self.token.span,
+ reason: ForbiddenLetReason::OtherForbidden,
+ }))
+ } else {
+ None
+ };
self.bump(); // Eat `let` token
let lo = self.prev_token.span;
let pat = self.parse_pat_allow_top_alt(
@@ -2477,12 +2483,9 @@ impl<'a> Parser<'a> {
} else {
self.expect(&token::Eq)?;
}
- let expr = self.with_res(self.restrictions | Restrictions::NO_STRUCT_LITERAL, |this| {
- this.parse_expr_assoc_with(1 + prec_let_scrutinee_needs_par(), None.into())
- })?;
+ let expr = self.parse_expr_assoc_with(1 + prec_let_scrutinee_needs_par(), None.into())?;
let span = lo.to(expr.span);
- self.sess.gated_spans.gate(sym::let_chains, span);
- Ok(self.mk_expr(span, ExprKind::Let(pat, expr, span)))
+ Ok(self.mk_expr(span, ExprKind::Let(pat, expr, span, is_recovered)))
}
/// Parses an `else { ... }` expression (`else` token already eaten).
@@ -2831,7 +2834,10 @@ impl<'a> Parser<'a> {
)?;
let guard = if this.eat_keyword(kw::If) {
let if_span = this.prev_token.span;
- let cond = this.parse_expr_res(Restrictions::ALLOW_LET, None)?;
+ let mut cond = this.parse_expr_res(Restrictions::ALLOW_LET, None)?;
+
+ CondChecker { parser: this, forbid_let_reason: None }.visit_expr(&mut cond);
+
let (has_let_expr, does_not_have_bin_op) = check_let_expr(&cond);
if has_let_expr {
if does_not_have_bin_op {
@@ -3416,3 +3422,130 @@ impl<'a> Parser<'a> {
})
}
}
+
+/// Used to forbid `let` expressions in certain syntactic locations.
+#[derive(Clone, Copy, Subdiagnostic)]
+pub(crate) enum ForbiddenLetReason {
+ /// `let` is not valid and the source environment is not important
+ OtherForbidden,
+ /// A let chain with the `||` operator
+ #[note(parse_not_supported_or)]
+ NotSupportedOr(#[primary_span] Span),
+ /// A let chain with invalid parentheses
+ ///
+ /// For example, `let 1 = 1 && (expr && expr)` is allowed
+ /// but `(let 1 = 1 && (let 1 = 1 && (let 1 = 1))) && let a = 1` is not
+ #[note(parse_not_supported_parentheses)]
+ NotSupportedParentheses(#[primary_span] Span),
+}
+
+/// Visitor to check for invalid/unstable use of `ExprKind::Let` that can't
+/// easily be caught in parsing. For example:
+///
+/// ```rust,ignore (example)
+/// // Only know that the let isn't allowed once the `||` token is reached
+/// if let Some(x) = y || true {}
+/// // Only know that the let isn't allowed once the second `=` token is reached.
+/// if let Some(x) = y && z = 1 {}
+/// ```
+struct CondChecker<'a> {
+ parser: &'a Parser<'a>,
+ forbid_let_reason: Option<ForbiddenLetReason>,
+}
+
+impl MutVisitor for CondChecker<'_> {
+ fn visit_expr(&mut self, e: &mut P<Expr>) {
+ use ForbiddenLetReason::*;
+
+ let span = e.span;
+ match e.kind {
+ ExprKind::Let(_, _, _, ref mut is_recovered @ None) => {
+ if let Some(reason) = self.forbid_let_reason {
+ *is_recovered = Some(
+ self.parser
+ .sess
+ .emit_err(errors::ExpectedExpressionFoundLet { span, reason }),
+ );
+ } else {
+ self.parser.sess.gated_spans.gate(sym::let_chains, span);
+ }
+ }
+ ExprKind::Binary(Spanned { node: BinOpKind::And, .. }, _, _) => {
+ noop_visit_expr(e, self);
+ }
+ ExprKind::Binary(Spanned { node: BinOpKind::Or, span: or_span }, _, _)
+ if let None | Some(NotSupportedOr(_)) = self.forbid_let_reason =>
+ {
+ let forbid_let_reason = self.forbid_let_reason;
+ self.forbid_let_reason = Some(NotSupportedOr(or_span));
+ noop_visit_expr(e, self);
+ self.forbid_let_reason = forbid_let_reason;
+ }
+ ExprKind::Paren(ref inner)
+ if let None | Some(NotSupportedParentheses(_)) = self.forbid_let_reason =>
+ {
+ let forbid_let_reason = self.forbid_let_reason;
+ self.forbid_let_reason = Some(NotSupportedParentheses(inner.span));
+ noop_visit_expr(e, self);
+ self.forbid_let_reason = forbid_let_reason;
+ }
+ ExprKind::Unary(_, _)
+ | ExprKind::Await(_, _)
+ | ExprKind::Assign(_, _, _)
+ | ExprKind::AssignOp(_, _, _)
+ | ExprKind::Range(_, _, _)
+ | ExprKind::Try(_)
+ | ExprKind::AddrOf(_, _, _)
+ | ExprKind::Binary(_, _, _)
+ | ExprKind::Field(_, _)
+ | ExprKind::Index(_, _, _)
+ | ExprKind::Call(_, _)
+ | ExprKind::MethodCall(_)
+ | ExprKind::Tup(_)
+ | ExprKind::Paren(_) => {
+ let forbid_let_reason = self.forbid_let_reason;
+ self.forbid_let_reason = Some(OtherForbidden);
+ noop_visit_expr(e, self);
+ self.forbid_let_reason = forbid_let_reason;
+ }
+ ExprKind::Cast(ref mut op, _)
+ | ExprKind::Type(ref mut op, _) => {
+ let forbid_let_reason = self.forbid_let_reason;
+ self.forbid_let_reason = Some(OtherForbidden);
+ self.visit_expr(op);
+ self.forbid_let_reason = forbid_let_reason;
+ }
+ ExprKind::Let(_, _, _, Some(_))
+ | ExprKind::Array(_)
+ | ExprKind::ConstBlock(_)
+ | ExprKind::Lit(_)
+ | ExprKind::If(_, _, _)
+ | ExprKind::While(_, _, _)
+ | ExprKind::ForLoop(_, _, _, _)
+ | ExprKind::Loop(_, _, _)
+ | ExprKind::Match(_, _)
+ | ExprKind::Closure(_)
+ | ExprKind::Block(_, _)
+ | ExprKind::Async(_, _)
+ | ExprKind::TryBlock(_)
+ | ExprKind::Underscore
+ | ExprKind::Path(_, _)
+ | ExprKind::Break(_, _)
+ | ExprKind::Continue(_)
+ | ExprKind::Ret(_)
+ | ExprKind::InlineAsm(_)
+ | ExprKind::OffsetOf(_, _)
+ | ExprKind::MacCall(_)
+ | ExprKind::Struct(_)
+ | ExprKind::Repeat(_, _)
+ | ExprKind::Yield(_)
+ | ExprKind::Yeet(_)
+ | ExprKind::Become(_)
+ | ExprKind::IncludedBytes(_)
+ | ExprKind::FormatArgs(_)
+ | ExprKind::Err => {
+ // These would forbid any let expressions they contain already.
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_parse/src/parser/item.rs b/compiler/rustc_parse/src/parser/item.rs
index 24c65d061..aad4edaba 100644
--- a/compiler/rustc_parse/src/parser/item.rs
+++ b/compiler/rustc_parse/src/parser/item.rs
@@ -73,12 +73,16 @@ impl<'a> Parser<'a> {
if !self.maybe_consume_incorrect_semicolon(&items) {
let msg = format!("expected item, found {token_str}");
let mut err = self.struct_span_err(self.token.span, msg);
- let label = if self.is_kw_followed_by_ident(kw::Let) {
- "consider using `const` or `static` instead of `let` for global variables"
+ let span = self.token.span;
+ if self.is_kw_followed_by_ident(kw::Let) {
+ err.span_label(
+ span,
+ "consider using `const` or `static` instead of `let` for global variables",
+ );
} else {
- "expected item"
+ err.span_label(span, "expected item")
+ .note("for a full list of items that can appear in modules, see <https://doc.rust-lang.org/reference/items.html>");
};
- err.span_label(self.token.span, label);
return Err(err);
}
}
@@ -1594,7 +1598,7 @@ impl<'a> Parser<'a> {
Ok((class_name, ItemKind::Union(vdata, generics)))
}
- fn parse_record_struct_body(
+ pub(crate) fn parse_record_struct_body(
&mut self,
adt_ty: &str,
ident_span: Span,
@@ -1851,25 +1855,15 @@ impl<'a> Parser<'a> {
attrs: AttrVec,
) -> PResult<'a, FieldDef> {
let name = self.parse_field_ident(adt_ty, lo)?;
- // Parse the macro invocation and recover
if self.token.kind == token::Not {
if let Err(mut err) = self.unexpected::<FieldDef>() {
- err.subdiagnostic(MacroExpandsToAdtField { adt_ty }).emit();
- self.bump();
- self.parse_delim_args()?;
- return Ok(FieldDef {
- span: DUMMY_SP,
- ident: None,
- vis,
- id: DUMMY_NODE_ID,
- ty: self.mk_ty(DUMMY_SP, TyKind::Err),
- attrs,
- is_placeholder: false,
- });
+ // Encounter the macro invocation
+ err.subdiagnostic(MacroExpandsToAdtField { adt_ty });
+ return Err(err);
}
}
self.expect_field_ty_separator()?;
- let ty = self.parse_ty()?;
+ let ty = self.parse_ty_for_field_def()?;
if self.token.kind == token::Colon && self.look_ahead(1, |tok| tok.kind != token::Colon) {
self.sess.emit_err(errors::SingleColonStructType { span: self.token.span });
}
@@ -1894,7 +1888,9 @@ impl<'a> Parser<'a> {
/// for better diagnostics and suggestions.
fn parse_field_ident(&mut self, adt_ty: &str, lo: Span) -> PResult<'a, Ident> {
let (ident, is_raw) = self.ident_or_err(true)?;
- if !is_raw && ident.is_reserved() {
+ if ident.name == kw::Underscore {
+ self.sess.gated_spans.gate(sym::unnamed_fields, lo);
+ } else if !is_raw && ident.is_reserved() {
let snapshot = self.create_snapshot_for_diagnostic();
let err = if self.check_fn_front_matter(false, Case::Sensitive) {
let inherited_vis = Visibility {
diff --git a/compiler/rustc_parse/src/parser/mod.rs b/compiler/rustc_parse/src/parser/mod.rs
index 77c59bb38..e84d8f5b3 100644
--- a/compiler/rustc_parse/src/parser/mod.rs
+++ b/compiler/rustc_parse/src/parser/mod.rs
@@ -13,6 +13,7 @@ mod ty;
use crate::lexer::UnmatchedDelim;
pub use attr_wrapper::AttrWrapper;
pub use diagnostics::AttemptLocalParseRecovery;
+pub(crate) use expr::ForbiddenLetReason;
pub(crate) use item::FnParseMode;
pub use pat::{CommaRecoveryMode, RecoverColon, RecoverComma};
pub use path::PathStyle;
diff --git a/compiler/rustc_parse/src/parser/ty.rs b/compiler/rustc_parse/src/parser/ty.rs
index 2d888efb1..a25b0f1f8 100644
--- a/compiler/rustc_parse/src/parser/ty.rs
+++ b/compiler/rustc_parse/src/parser/ty.rs
@@ -136,6 +136,17 @@ impl<'a> Parser<'a> {
)
}
+ /// Parse a type suitable for a field defintion.
+ /// The difference from `parse_ty` is that this version
+ /// allows anonymous structs and unions.
+ pub fn parse_ty_for_field_def(&mut self) -> PResult<'a, P<Ty>> {
+ if self.can_begin_anon_struct_or_union() {
+ self.parse_anon_struct_or_union()
+ } else {
+ self.parse_ty()
+ }
+ }
+
/// Parse a type suitable for a function or function pointer parameter.
/// The difference from `parse_ty` is that this version allows `...`
/// (`CVarArgs`) at the top level of the type.
@@ -336,6 +347,36 @@ impl<'a> Parser<'a> {
if allow_qpath_recovery { self.maybe_recover_from_bad_qpath(ty) } else { Ok(ty) }
}
+ /// Parse an anonymous struct or union (only for field definitions):
+ /// ```ignore (feature-not-ready)
+ /// #[repr(C)]
+ /// struct Foo {
+ /// _: struct { // anonymous struct
+ /// x: u32,
+ /// y: f64,
+ /// }
+ /// _: union { // anonymous union
+ /// z: u32,
+ /// w: f64,
+ /// }
+ /// }
+ /// ```
+ fn parse_anon_struct_or_union(&mut self) -> PResult<'a, P<Ty>> {
+ assert!(self.token.is_keyword(kw::Union) || self.token.is_keyword(kw::Struct));
+ let is_union = self.token.is_keyword(kw::Union);
+
+ let lo = self.token.span;
+ self.bump();
+
+ let (fields, _recovered) =
+ self.parse_record_struct_body(if is_union { "union" } else { "struct" }, lo, false)?;
+ let span = lo.to(self.prev_token.span);
+ self.sess.gated_spans.gate(sym::unnamed_fields, span);
+ // These can be rejected during AST validation in `deny_anon_struct_or_union`.
+ let kind = if is_union { TyKind::AnonUnion(fields) } else { TyKind::AnonStruct(fields) };
+ Ok(self.mk_ty(span, kind))
+ }
+
/// Parses either:
/// - `(TYPE)`, a parenthesized type.
/// - `(TYPE,)`, a tuple with a single field of type TYPE.
@@ -696,6 +737,11 @@ impl<'a> Parser<'a> {
Ok(bounds)
}
+ pub(super) fn can_begin_anon_struct_or_union(&mut self) -> bool {
+ (self.token.is_keyword(kw::Struct) || self.token.is_keyword(kw::Union))
+ && self.look_ahead(1, |t| t == &token::OpenDelim(Delimiter::Brace))
+ }
+
/// Can the current token begin a bound?
fn can_begin_bound(&mut self) -> bool {
// This needs to be synchronized with `TokenKind::can_begin_bound`.
@@ -845,18 +891,32 @@ impl<'a> Parser<'a> {
// that we do not use the try operator when parsing the type because
// if it fails then we get a parser error which we don't want (we're trying
// to recover from errors, not make more).
- let path = if self.may_recover()
- && matches!(ty.kind, TyKind::Ptr(..) | TyKind::Ref(..))
- && let TyKind::Path(_, path) = &ty.peel_refs().kind {
- // Just get the indirection part of the type.
- let span = ty.span.until(path.span);
-
- err.span_suggestion_verbose(
- span,
- "consider removing the indirection",
- "",
- Applicability::MaybeIncorrect,
- );
+ let path = if self.may_recover() {
+ let (span, message, sugg, path, applicability) = match &ty.kind {
+ TyKind::Ptr(..) | TyKind::Ref(..) if let TyKind::Path(_, path) = &ty.peel_refs().kind => {
+ (
+ ty.span.until(path.span),
+ "consider removing the indirection",
+ "",
+ path,
+ Applicability::MaybeIncorrect
+ )
+ }
+ TyKind::ImplTrait(_, bounds)
+ if let [GenericBound::Trait(tr, ..), ..] = bounds.as_slice() =>
+ {
+ (
+ ty.span.until(tr.span),
+ "use the trait bounds directly",
+ "",
+ &tr.trait_ref.path,
+ Applicability::MachineApplicable
+ )
+ }
+ _ => return Err(err)
+ };
+
+ err.span_suggestion_verbose(span, message, sugg, applicability);
path.clone()
} else {
diff --git a/compiler/rustc_parse_format/Cargo.toml b/compiler/rustc_parse_format/Cargo.toml
index 72da398d3..143303532 100644
--- a/compiler/rustc_parse_format/Cargo.toml
+++ b/compiler/rustc_parse_format/Cargo.toml
@@ -5,4 +5,4 @@ edition = "2021"
[dependencies]
rustc_lexer = { path = "../rustc_lexer" }
-rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_index = { path = "../rustc_index", default-features = false }
diff --git a/compiler/rustc_parse_format/src/lib.rs b/compiler/rustc_parse_format/src/lib.rs
index 88452ccdf..90ac436a9 100644
--- a/compiler/rustc_parse_format/src/lib.rs
+++ b/compiler/rustc_parse_format/src/lib.rs
@@ -210,7 +210,17 @@ pub struct ParseError {
pub label: string::String,
pub span: InnerSpan,
pub secondary_label: Option<(string::String, InnerSpan)>,
- pub should_be_replaced_with_positional_argument: bool,
+ pub suggestion: Suggestion,
+}
+
+pub enum Suggestion {
+ None,
+ /// Replace inline argument with positional argument:
+ /// `format!("{foo.bar}")` -> `format!("{}", foo.bar)`
+ UsePositional,
+ /// Remove `r#` from identifier:
+ /// `format!("{r#foo}")` -> `format!("{foo}")`
+ RemoveRawIdent(InnerSpan),
}
/// The parser structure for interpreting the input format string. This is
@@ -365,7 +375,7 @@ impl<'a> Parser<'a> {
label: label.into(),
span,
secondary_label: None,
- should_be_replaced_with_positional_argument: false,
+ suggestion: Suggestion::None,
});
}
@@ -389,7 +399,7 @@ impl<'a> Parser<'a> {
label: label.into(),
span,
secondary_label: None,
- should_be_replaced_with_positional_argument: false,
+ suggestion: Suggestion::None,
});
}
@@ -493,7 +503,7 @@ impl<'a> Parser<'a> {
label,
span: pos.to(pos),
secondary_label,
- should_be_replaced_with_positional_argument: false,
+ suggestion: Suggestion::None,
});
None
@@ -573,7 +583,37 @@ impl<'a> Parser<'a> {
Some(ArgumentIs(i))
} else {
match self.cur.peek() {
- Some(&(_, c)) if rustc_lexer::is_id_start(c) => Some(ArgumentNamed(self.word())),
+ Some(&(lo, c)) if rustc_lexer::is_id_start(c) => {
+ let word = self.word();
+
+ // Recover from `r#ident` in format strings.
+ // FIXME: use a let chain
+ if word == "r" {
+ if let Some((pos, '#')) = self.cur.peek() {
+ if self.input[pos + 1..]
+ .chars()
+ .next()
+ .is_some_and(rustc_lexer::is_id_start)
+ {
+ self.cur.next();
+ let word = self.word();
+ let prefix_span = self.span(lo, lo + 2);
+ let full_span = self.span(lo, lo + 2 + word.len());
+ self.errors.insert(0, ParseError {
+ description: "raw identifiers are not supported".to_owned(),
+ note: Some("identifiers in format strings can be keywords and don't need to be prefixed with `r#`".to_string()),
+ label: "raw identifier used here".to_owned(),
+ span: full_span,
+ secondary_label: None,
+ suggestion: Suggestion::RemoveRawIdent(prefix_span),
+ });
+ return Some(ArgumentNamed(word));
+ }
+ }
+ }
+
+ Some(ArgumentNamed(word))
+ }
// This is an `ArgumentNext`.
// Record the fact and do the resolution after parsing the
@@ -841,7 +881,7 @@ impl<'a> Parser<'a> {
label: "expected `?` to occur after `:`".to_owned(),
span: pos.to(pos),
secondary_label: None,
- should_be_replaced_with_positional_argument: false,
+ suggestion: Suggestion::None,
},
);
}
@@ -867,7 +907,7 @@ impl<'a> Parser<'a> {
label: "not supported".to_string(),
span: InnerSpan::new(arg.position_span.start, field.position_span.end),
secondary_label: None,
- should_be_replaced_with_positional_argument: true,
+ suggestion: Suggestion::UsePositional,
},
);
}
@@ -1011,7 +1051,7 @@ fn unescape_string(string: &str) -> Option<string::String> {
// Assert a reasonable size for `Piece`
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-rustc_data_structures::static_assert_size!(Piece<'_>, 16);
+rustc_index::static_assert_size!(Piece<'_>, 16);
#[cfg(test)]
mod tests;
diff --git a/compiler/rustc_passes/messages.ftl b/compiler/rustc_passes/messages.ftl
index 6eacbebe7..214c6d709 100644
--- a/compiler/rustc_passes/messages.ftl
+++ b/compiler/rustc_passes/messages.ftl
@@ -4,11 +4,14 @@
-passes_see_issue =
see issue #{$issue} <https://github.com/rust-lang/rust/issues/{$issue}> for more information
-passes_abi =
- abi: {$abi}
-
-passes_align =
- align: {$align}
+passes_abi_invalid_attribute =
+ `#[rustc_abi]` can only be applied to function items, type aliases, and associated functions
+passes_abi_ne =
+ ABIs are not compatible
+ left ABI = {$left}
+ right ABI = {$right}
+passes_abi_of =
+ fn_abi_of({$fn_name}) = {$fn_abi}
passes_allow_incoherent_impl =
`rustc_allow_incoherent_impl` attribute should be applied to impl items.
@@ -101,15 +104,24 @@ passes_collapse_debuginfo =
passes_confusables = attribute should be applied to an inherent method
.label = not an inherent method
-passes_const_impl_const_trait =
- const `impl`s must be for traits marked with `#[const_trait]`
- .note = this trait must be annotated with `#[const_trait]`
-
passes_continue_labeled_block =
`continue` pointing to a labeled block
.label = labeled blocks cannot be `continue`'d
.block_label = labeled block the `continue` points to
+passes_coverage_fn_defn =
+ `#[coverage]` may only be applied to function definitions
+
+passes_coverage_ignored_function_prototype =
+ `#[coverage]` is ignored on function prototypes
+
+passes_coverage_not_coverable =
+ `#[coverage]` must be applied to coverable code
+ .label = not coverable code
+
+passes_coverage_propagate =
+ `#[coverage]` does not propagate into items and must be applied to the contained functions directly
+
passes_dead_codes =
{ $multiple ->
*[true] multiple {$descr}s are
@@ -141,6 +153,9 @@ passes_deprecated_annotation_has_no_effect =
passes_deprecated_attribute =
deprecated attribute must be paired with either stable or unstable attribute
+passes_diagnostic_diagnostic_on_unimplemented_only_for_traits =
+ `#[diagnostic::on_unimplemented]` can only be applied to trait definitions
+
passes_diagnostic_item_first_defined =
the diagnostic item is first defined here
@@ -315,9 +330,6 @@ passes_has_incoherent_inherent_impl =
`rustc_has_incoherent_inherent_impls` attribute should be applied to types or traits.
.label = only adts, extern types and traits are supported
-passes_homogeneous_aggregate =
- homogeneous_aggregate: {$homogeneous_aggregate}
-
passes_ignored_attr =
`#[{$sym}]` is ignored on struct fields and match arms
.warn = {-passes_previously_accepted}
@@ -395,15 +407,28 @@ passes_invalid_stability =
.label = invalid stability version
.item = the stability attribute annotates this item
+passes_lang_item_fn_with_target_feature =
+ `{$name}` language item function is not allowed to have `#[target_feature]`
+ .label = `{$name}` language item function is not allowed to have `#[target_feature]`
+
passes_lang_item_on_incorrect_target =
`{$name}` language item must be applied to a {$expected_target}
.label = attribute should be applied to a {$expected_target}, not a {$actual_target}
passes_layout =
layout error: {$layout_error}
-
+passes_layout_abi =
+ abi: {$abi}
+passes_layout_align =
+ align: {$align}
+passes_layout_homogeneous_aggregate =
+ homogeneous_aggregate: {$homogeneous_aggregate}
+passes_layout_invalid_attribute =
+ `#[rustc_layout]` can only be applied to `struct`/`enum`/`union` declarations and type aliases
passes_layout_of =
layout_of({$normalized_ty}) = {$ty_layout}
+passes_layout_size =
+ size: {$size}
passes_link =
attribute should be applied to an `extern` block with non-Rust ABI
@@ -494,19 +519,6 @@ passes_naked_functions_operands =
passes_naked_tracked_caller =
cannot use `#[track_caller]` with `#[naked]`
-passes_no_coverage_fn_defn =
- `#[no_coverage]` may only be applied to function definitions
-
-passes_no_coverage_ignored_function_prototype =
- `#[no_coverage]` is ignored on function prototypes
-
-passes_no_coverage_not_coverable =
- `#[no_coverage]` must be applied to coverable code
- .label = not coverable code
-
-passes_no_coverage_propagate =
- `#[no_coverage]` does not propagate into items and must be applied to the contained functions directly
-
passes_no_link =
attribute should be applied to an `extern crate` item
.label = not an `extern crate` item
@@ -636,6 +648,10 @@ passes_rustc_lint_opt_ty =
`#[rustc_lint_opt_ty]` should be applied to a struct
.label = not a struct
+passes_rustc_safe_intrinsic =
+ attribute should be applied to intrinsic functions
+ .label = not an intrinsic function
+
passes_rustc_std_internal_symbol =
attribute should be applied to functions or statics
.label = not a function or static
@@ -659,9 +675,6 @@ passes_should_be_applied_to_trait =
attribute should be applied to a trait
.label = not a trait
-passes_size =
- size: {$size}
-
passes_skipping_const_checks = skipping const checks
passes_stability_promotable =
@@ -721,7 +734,7 @@ passes_unrecognized_field =
passes_unrecognized_repr_hint =
unrecognized representation hint
- .help = valid reprs are `C`, `align`, `packed`, `transparent`, `simd`, `i8`, `u8`, `i16`, `u16`, `i32`, `u32`, `i64`, `u64`, `i128`, `u128`, `isize`, `usize`
+ .help = valid reprs are `Rust` (default), `C`, `align`, `packed`, `transparent`, `simd`, `i8`, `u8`, `i16`, `u16`, `i32`, `u32`, `i64`, `u64`, `i128`, `u128`, `isize`, `usize`
passes_unused =
unused attribute
diff --git a/compiler/rustc_passes/src/abi_test.rs b/compiler/rustc_passes/src/abi_test.rs
new file mode 100644
index 000000000..153c39977
--- /dev/null
+++ b/compiler/rustc_passes/src/abi_test.rs
@@ -0,0 +1,197 @@
+use rustc_ast::Attribute;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::LocalDefId;
+use rustc_middle::ty::layout::{FnAbiError, LayoutError};
+use rustc_middle::ty::{self, GenericArgs, Instance, Ty, TyCtxt};
+use rustc_span::source_map::Spanned;
+use rustc_span::symbol::sym;
+use rustc_target::abi::call::FnAbi;
+
+use super::layout_test::ensure_wf;
+use crate::errors::{AbiInvalidAttribute, AbiNe, AbiOf, UnrecognizedField};
+
+pub fn test_abi(tcx: TyCtxt<'_>) {
+ if !tcx.features().rustc_attrs {
+ // if the `rustc_attrs` feature is not enabled, don't bother testing ABI
+ return;
+ }
+ for id in tcx.hir_crate_items(()).definitions() {
+ for attr in tcx.get_attrs(id, sym::rustc_abi) {
+ match tcx.def_kind(id) {
+ DefKind::Fn | DefKind::AssocFn => {
+ dump_abi_of_fn_item(tcx, id, attr);
+ }
+ DefKind::TyAlias => {
+ dump_abi_of_fn_type(tcx, id, attr);
+ }
+ _ => {
+ tcx.sess.emit_err(AbiInvalidAttribute { span: tcx.def_span(id) });
+ }
+ }
+ }
+ }
+}
+
+fn unwrap_fn_abi<'tcx>(
+ abi: Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>>,
+ tcx: TyCtxt<'tcx>,
+ item_def_id: LocalDefId,
+) -> &'tcx FnAbi<'tcx, Ty<'tcx>> {
+ match abi {
+ Ok(abi) => abi,
+ Err(FnAbiError::Layout(layout_error)) => {
+ tcx.sess.emit_fatal(Spanned {
+ node: layout_error.into_diagnostic(),
+ span: tcx.def_span(item_def_id),
+ });
+ }
+ Err(FnAbiError::AdjustForForeignAbi(e)) => {
+ // Sadly there seems to be no `into_diagnostic` for this case... and I am not sure if
+ // this can even be reached. Anyway this is a perma-unstable debug attribute, an ICE
+ // isn't the worst thing. Also this matches what codegen does.
+ span_bug!(
+ tcx.def_span(item_def_id),
+ "error computing fn_abi_of_instance, cannot adjust for foreign ABI: {e:?}",
+ )
+ }
+ }
+}
+
+fn dump_abi_of_fn_item(tcx: TyCtxt<'_>, item_def_id: LocalDefId, attr: &Attribute) {
+ let param_env = tcx.param_env(item_def_id);
+ let args = GenericArgs::identity_for_item(tcx, item_def_id);
+ let instance = match Instance::resolve(tcx, param_env, item_def_id.into(), args) {
+ Ok(Some(instance)) => instance,
+ Ok(None) => {
+ // Not sure what to do here, but `LayoutError::Unknown` seems reasonable?
+ let ty = tcx.type_of(item_def_id).instantiate_identity();
+ tcx.sess.emit_fatal(Spanned {
+ node: LayoutError::Unknown(ty).into_diagnostic(),
+
+ span: tcx.def_span(item_def_id),
+ });
+ }
+ Err(_guaranteed) => return,
+ };
+ let abi = unwrap_fn_abi(
+ tcx.fn_abi_of_instance(param_env.and((instance, /* extra_args */ ty::List::empty()))),
+ tcx,
+ item_def_id,
+ );
+
+ // Check out the `#[rustc_abi(..)]` attribute to tell what to dump.
+ // The `..` are the names of fields to dump.
+ let meta_items = attr.meta_item_list().unwrap_or_default();
+ for meta_item in meta_items {
+ match meta_item.name_or_empty() {
+ sym::debug => {
+ let fn_name = tcx.item_name(item_def_id.into());
+ tcx.sess.emit_err(AbiOf {
+ span: tcx.def_span(item_def_id),
+ fn_name,
+ // FIXME: using the `Debug` impl here isn't ideal.
+ fn_abi: format!("{:#?}", abi),
+ });
+ }
+
+ name => {
+ tcx.sess.emit_err(UnrecognizedField { span: meta_item.span(), name });
+ }
+ }
+ }
+}
+
+fn test_abi_eq<'tcx>(abi1: &'tcx FnAbi<'tcx, Ty<'tcx>>, abi2: &'tcx FnAbi<'tcx, Ty<'tcx>>) -> bool {
+ if abi1.conv != abi2.conv
+ || abi1.args.len() != abi2.args.len()
+ || abi1.c_variadic != abi2.c_variadic
+ || abi1.fixed_count != abi2.fixed_count
+ || abi1.can_unwind != abi2.can_unwind
+ {
+ return false;
+ }
+
+ abi1.ret.eq_abi(&abi2.ret)
+ && abi1.args.iter().zip(abi2.args.iter()).all(|(arg1, arg2)| arg1.eq_abi(arg2))
+}
+
+fn dump_abi_of_fn_type(tcx: TyCtxt<'_>, item_def_id: LocalDefId, attr: &Attribute) {
+ let param_env = tcx.param_env(item_def_id);
+ let ty = tcx.type_of(item_def_id).instantiate_identity();
+ let span = tcx.def_span(item_def_id);
+ if !ensure_wf(tcx, param_env, ty, item_def_id, span) {
+ return;
+ }
+ let meta_items = attr.meta_item_list().unwrap_or_default();
+ for meta_item in meta_items {
+ match meta_item.name_or_empty() {
+ sym::debug => {
+ let ty::FnPtr(sig) = ty.kind() else {
+ span_bug!(
+ meta_item.span(),
+ "`#[rustc_abi(debug)]` on a type alias requires function pointer type"
+ );
+ };
+ let abi = unwrap_fn_abi(
+ tcx.fn_abi_of_fn_ptr(param_env.and((*sig, /* extra_args */ ty::List::empty()))),
+ tcx,
+ item_def_id,
+ );
+
+ let fn_name = tcx.item_name(item_def_id.into());
+ tcx.sess.emit_err(AbiOf { span, fn_name, fn_abi: format!("{:#?}", abi) });
+ }
+ sym::assert_eq => {
+ let ty::Tuple(fields) = ty.kind() else {
+ span_bug!(
+ meta_item.span(),
+ "`#[rustc_abi(assert_eq)]` on a type alias requires pair type"
+ );
+ };
+ let [field1, field2] = ***fields else {
+ span_bug!(
+ meta_item.span(),
+ "`#[rustc_abi(assert_eq)]` on a type alias requires pair type"
+ );
+ };
+ let ty::FnPtr(sig1) = field1.kind() else {
+ span_bug!(
+ meta_item.span(),
+ "`#[rustc_abi(assert_eq)]` on a type alias requires pair of function pointer types"
+ );
+ };
+ let abi1 = unwrap_fn_abi(
+ tcx.fn_abi_of_fn_ptr(
+ param_env.and((*sig1, /* extra_args */ ty::List::empty())),
+ ),
+ tcx,
+ item_def_id,
+ );
+ let ty::FnPtr(sig2) = field2.kind() else {
+ span_bug!(
+ meta_item.span(),
+ "`#[rustc_abi(assert_eq)]` on a type alias requires pair of function pointer types"
+ );
+ };
+ let abi2 = unwrap_fn_abi(
+ tcx.fn_abi_of_fn_ptr(
+ param_env.and((*sig2, /* extra_args */ ty::List::empty())),
+ ),
+ tcx,
+ item_def_id,
+ );
+
+ if !test_abi_eq(abi1, abi2) {
+ tcx.sess.emit_err(AbiNe {
+ span,
+ left: format!("{:#?}", abi1),
+ right: format!("{:#?}", abi2),
+ });
+ }
+ }
+ name => {
+ tcx.sess.emit_err(UnrecognizedField { span: meta_item.span(), name });
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs
index 197b335bd..d92923e78 100644
--- a/compiler/rustc_passes/src/check_attr.rs
+++ b/compiler/rustc_passes/src/check_attr.rs
@@ -16,6 +16,7 @@ use rustc_hir::{
self, FnSig, ForeignItem, HirId, Item, ItemKind, TraitItem, CRATE_HIR_ID, CRATE_OWNER_ID,
};
use rustc_hir::{MethodKind, Target, Unsafety};
+use rustc_macros::LintDiagnostic;
use rustc_middle::hir::nested_filter;
use rustc_middle::middle::resolve_bound_vars::ObjectLifetimeDefault;
use rustc_middle::query::Providers;
@@ -24,7 +25,7 @@ use rustc_middle::ty::error::{ExpectedFound, TypeError};
use rustc_middle::ty::{self, TyCtxt};
use rustc_session::lint::builtin::{
CONFLICTING_REPR_HINTS, INVALID_DOC_ATTRIBUTES, INVALID_MACRO_EXPORT_ARGUMENTS,
- UNUSED_ATTRIBUTES,
+ UNKNOWN_OR_MALFORMED_DIAGNOSTIC_ATTRIBUTES, UNUSED_ATTRIBUTES,
};
use rustc_session::parse::feature_err;
use rustc_span::symbol::{kw, sym, Symbol};
@@ -36,6 +37,10 @@ use rustc_trait_selection::traits::ObligationCtxt;
use std::cell::Cell;
use std::collections::hash_map::Entry;
+#[derive(LintDiagnostic)]
+#[diag(passes_diagnostic_diagnostic_on_unimplemented_only_for_traits)]
+pub struct DiagnosticOnUnimplementedOnlyForTraits;
+
pub(crate) fn target_from_impl_item<'tcx>(
tcx: TyCtxt<'tcx>,
impl_item: &hir::ImplItem<'_>,
@@ -104,13 +109,16 @@ impl CheckAttrVisitor<'_> {
let mut seen = FxHashMap::default();
let attrs = self.tcx.hir().attrs(hir_id);
for attr in attrs {
+ if attr.path_matches(&[sym::diagnostic, sym::on_unimplemented]) {
+ self.check_diagnostic_on_unimplemented(attr.span, hir_id, target);
+ }
match attr.name_or_empty() {
sym::do_not_recommend => self.check_do_not_recommend(attr.span, target),
sym::inline => self.check_inline(hir_id, attr, span, target),
- sym::no_coverage => self.check_no_coverage(hir_id, attr, span, target),
+ sym::coverage => self.check_coverage(hir_id, attr, span, target),
sym::non_exhaustive => self.check_non_exhaustive(hir_id, attr, span, target),
sym::marker => self.check_marker(hir_id, attr, span, target),
- sym::target_feature => self.check_target_feature(hir_id, attr, span, target),
+ sym::target_feature => self.check_target_feature(hir_id, attr, span, target, attrs),
sym::thread_local => self.check_thread_local(attr, span, target),
sym::track_caller => {
self.check_track_caller(hir_id, attr.span, attrs, span, target)
@@ -139,6 +147,9 @@ impl CheckAttrVisitor<'_> {
self.check_rustc_std_internal_symbol(&attr, span, target)
}
sym::naked => self.check_naked(hir_id, attr, span, target),
+ sym::rustc_never_returns_null_ptr => {
+ self.check_applied_to_fn_or_method(hir_id, attr, span, target)
+ }
sym::rustc_legacy_const_generics => {
self.check_rustc_legacy_const_generics(hir_id, &attr, span, target, item)
}
@@ -184,6 +195,9 @@ impl CheckAttrVisitor<'_> {
| sym::rustc_promotable => self.check_stability_promotable(&attr, span, target),
sym::link_ordinal => self.check_link_ordinal(&attr, span, target),
sym::rustc_confusables => self.check_confusables(&attr, target),
+ sym::rustc_safe_intrinsic => {
+ self.check_rustc_safe_intrinsic(hir_id, attr, span, target)
+ }
_ => true,
};
@@ -284,6 +298,18 @@ impl CheckAttrVisitor<'_> {
}
}
+ /// Checks if `#[diagnostic::on_unimplemented]` is applied to a trait definition
+ fn check_diagnostic_on_unimplemented(&self, attr_span: Span, hir_id: HirId, target: Target) {
+ if !matches!(target, Target::Trait) {
+ self.tcx.emit_spanned_lint(
+ UNKNOWN_OR_MALFORMED_DIAGNOSTIC_ATTRIBUTES,
+ hir_id,
+ attr_span,
+ DiagnosticOnUnimplementedOnlyForTraits,
+ );
+ }
+ }
+
/// Checks if an `#[inline]` is applied to a function or a closure. Returns `true` if valid.
fn check_inline(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) -> bool {
match target {
@@ -327,16 +353,10 @@ impl CheckAttrVisitor<'_> {
}
}
- /// Checks if a `#[no_coverage]` is applied directly to a function
- fn check_no_coverage(
- &self,
- hir_id: HirId,
- attr: &Attribute,
- span: Span,
- target: Target,
- ) -> bool {
+ /// Checks if a `#[coverage]` is applied directly to a function
+ fn check_coverage(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) -> bool {
match target {
- // no_coverage on function is fine
+ // #[coverage] on function is fine
Target::Fn
| Target::Closure
| Target::Method(MethodKind::Trait { body: true } | MethodKind::Inherent) => true,
@@ -347,7 +367,7 @@ impl CheckAttrVisitor<'_> {
UNUSED_ATTRIBUTES,
hir_id,
attr.span,
- errors::IgnoredNoCoverageFnProto,
+ errors::IgnoredCoverageFnProto,
);
true
}
@@ -357,7 +377,7 @@ impl CheckAttrVisitor<'_> {
UNUSED_ATTRIBUTES,
hir_id,
attr.span,
- errors::IgnoredNoCoveragePropagate,
+ errors::IgnoredCoveragePropagate,
);
true
}
@@ -367,13 +387,13 @@ impl CheckAttrVisitor<'_> {
UNUSED_ATTRIBUTES,
hir_id,
attr.span,
- errors::IgnoredNoCoverageFnDefn,
+ errors::IgnoredCoverageFnDefn,
);
true
}
_ => {
- self.tcx.sess.emit_err(errors::IgnoredNoCoverageNotCoverable {
+ self.tcx.sess.emit_err(errors::IgnoredCoverageNotCoverable {
attr_span: attr.span,
defn_span: span,
});
@@ -574,10 +594,36 @@ impl CheckAttrVisitor<'_> {
attr: &Attribute,
span: Span,
target: Target,
+ attrs: &[Attribute],
) -> bool {
match target {
- Target::Fn
- | Target::Method(MethodKind::Trait { body: true } | MethodKind::Inherent) => true,
+ Target::Fn => {
+ // `#[target_feature]` is not allowed in language items.
+ if let Some((lang_item, _)) = hir::lang_items::extract(attrs)
+ // Calling functions with `#[target_feature]` is
+ // not unsafe on WASM, see #84988
+ && !self.tcx.sess.target.is_like_wasm
+ && !self.tcx.sess.opts.actually_rustdoc
+ {
+ let hir::Node::Item(item) = self.tcx.hir().get(hir_id) else {
+ unreachable!();
+ };
+ let hir::ItemKind::Fn(sig, _, _) = item.kind else {
+ // target is `Fn`
+ unreachable!();
+ };
+
+ self.tcx.sess.emit_err(errors::LangItemWithTargetFeature {
+ attr_span: attr.span,
+ name: lang_item,
+ sig_span: sig.span,
+ });
+ false
+ } else {
+ true
+ }
+ }
+ Target::Method(MethodKind::Trait { body: true } | MethodKind::Inherent) => true,
// FIXME: #[target_feature] was previously erroneously allowed on statements and some
// crates used this, so only emit a warning.
Target::Statement => {
@@ -1721,6 +1767,7 @@ impl CheckAttrVisitor<'_> {
.collect();
let mut int_reprs = 0;
+ let mut is_explicit_rust = false;
let mut is_c = false;
let mut is_simd = false;
let mut is_transparent = false;
@@ -1732,6 +1779,9 @@ impl CheckAttrVisitor<'_> {
}
match hint.name_or_empty() {
+ sym::Rust => {
+ is_explicit_rust = true;
+ }
sym::C => {
is_c = true;
match target {
@@ -1841,12 +1891,16 @@ impl CheckAttrVisitor<'_> {
// Error on repr(transparent, <anything else>).
if is_transparent && hints.len() > 1 {
- let hint_spans: Vec<_> = hint_spans.clone().collect();
+ let hint_spans = hint_spans.clone().collect();
self.tcx.sess.emit_err(errors::TransparentIncompatible {
hint_spans,
target: target.to_string(),
});
}
+ if is_explicit_rust && (int_reprs > 0 || is_c || is_simd) {
+ let hint_spans = hint_spans.clone().collect();
+ self.tcx.sess.emit_err(errors::ReprConflicting { hint_spans });
+ }
// Warn on repr(u8, u16), repr(C, simd), and c-like-enum-repr(C, u8)
if (int_reprs > 1)
|| (is_simd && is_c)
@@ -1863,7 +1917,7 @@ impl CheckAttrVisitor<'_> {
CONFLICTING_REPR_HINTS,
hir_id,
hint_spans.collect::<Vec<Span>>(),
- errors::ReprConflicting,
+ errors::ReprConflictingLint,
);
}
}
@@ -1998,6 +2052,29 @@ impl CheckAttrVisitor<'_> {
}
}
+ fn check_rustc_safe_intrinsic(
+ &self,
+ hir_id: HirId,
+ attr: &Attribute,
+ span: Span,
+ target: Target,
+ ) -> bool {
+ let hir = self.tcx.hir();
+
+ if let Target::ForeignFn = target
+ && let Some(parent) = hir.opt_parent_id(hir_id)
+ && let hir::Node::Item(Item {
+ kind: ItemKind::ForeignMod { abi: Abi::RustIntrinsic | Abi::PlatformIntrinsic, .. },
+ ..
+ }) = hir.get(parent)
+ {
+ return true;
+ }
+
+ self.tcx.sess.emit_err(errors::RustcSafeIntrinsic { attr_span: attr.span, span });
+ false
+ }
+
fn check_rustc_std_internal_symbol(
&self,
attr: &Attribute,
@@ -2289,7 +2366,10 @@ impl CheckAttrVisitor<'_> {
&mut diag,
&cause,
None,
- Some(ValuePairs::Sigs(ExpectedFound { expected: expected_sig, found: sig })),
+ Some(ValuePairs::PolySigs(ExpectedFound {
+ expected: ty::Binder::dummy(expected_sig),
+ found: ty::Binder::dummy(sig),
+ })),
terr,
false,
false,
diff --git a/compiler/rustc_passes/src/check_const.rs b/compiler/rustc_passes/src/check_const.rs
index 8437e9a40..6d176af80 100644
--- a/compiler/rustc_passes/src/check_const.rs
+++ b/compiler/rustc_passes/src/check_const.rs
@@ -193,12 +193,12 @@ impl<'tcx> Visitor<'tcx> for CheckConstVisitor<'tcx> {
}
fn visit_anon_const(&mut self, anon: &'tcx hir::AnonConst) {
- let kind = Some(hir::ConstContext::Const);
+ let kind = Some(hir::ConstContext::Const { inline: false });
self.recurse_into(kind, None, |this| intravisit::walk_anon_const(this, anon));
}
fn visit_inline_const(&mut self, block: &'tcx hir::ConstBlock) {
- let kind = Some(hir::ConstContext::Const);
+ let kind = Some(hir::ConstContext::Const { inline: true });
self.recurse_into(kind, None, |this| intravisit::walk_inline_const(this, block));
}
diff --git a/compiler/rustc_passes/src/dead.rs b/compiler/rustc_passes/src/dead.rs
index d1c3bcf38..493daf314 100644
--- a/compiler/rustc_passes/src/dead.rs
+++ b/compiler/rustc_passes/src/dead.rs
@@ -96,7 +96,7 @@ impl<'tcx> MarkSymbolVisitor<'tcx> {
fn handle_res(&mut self, res: Res) {
match res {
- Res::Def(DefKind::Const | DefKind::AssocConst | DefKind::TyAlias { .. }, def_id) => {
+ Res::Def(DefKind::Const | DefKind::AssocConst | DefKind::TyAlias, def_id) => {
self.check_def_id(def_id);
}
_ if self.in_pat => {}
@@ -923,7 +923,7 @@ impl<'tcx> DeadVisitor<'tcx> {
| DefKind::Fn
| DefKind::Static(_)
| DefKind::Const
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::Enum
| DefKind::Union
| DefKind::ForeignTy => self.warn_dead_code(def_id, "used"),
diff --git a/compiler/rustc_passes/src/errors.rs b/compiler/rustc_passes/src/errors.rs
index 683717344..f4a6bf017 100644
--- a/compiler/rustc_passes/src/errors.rs
+++ b/compiler/rustc_passes/src/errors.rs
@@ -64,20 +64,20 @@ pub struct InlineNotFnOrClosure {
}
#[derive(LintDiagnostic)]
-#[diag(passes_no_coverage_ignored_function_prototype)]
-pub struct IgnoredNoCoverageFnProto;
+#[diag(passes_coverage_ignored_function_prototype)]
+pub struct IgnoredCoverageFnProto;
#[derive(LintDiagnostic)]
-#[diag(passes_no_coverage_propagate)]
-pub struct IgnoredNoCoveragePropagate;
+#[diag(passes_coverage_propagate)]
+pub struct IgnoredCoveragePropagate;
#[derive(LintDiagnostic)]
-#[diag(passes_no_coverage_fn_defn)]
-pub struct IgnoredNoCoverageFnDefn;
+#[diag(passes_coverage_fn_defn)]
+pub struct IgnoredCoverageFnDefn;
#[derive(Diagnostic)]
-#[diag(passes_no_coverage_not_coverable, code = "E0788")]
-pub struct IgnoredNoCoverageNotCoverable {
+#[diag(passes_coverage_not_coverable, code = "E0788")]
+pub struct IgnoredCoverageNotCoverable {
#[primary_span]
pub attr_span: Span,
#[label]
@@ -558,9 +558,16 @@ pub struct ReprIdent {
pub span: Span,
}
+#[derive(Diagnostic)]
+#[diag(passes_repr_conflicting, code = "E0566")]
+pub struct ReprConflicting {
+ #[primary_span]
+ pub hint_spans: Vec<Span>,
+}
+
#[derive(LintDiagnostic)]
#[diag(passes_repr_conflicting, code = "E0566")]
-pub struct ReprConflicting;
+pub struct ReprConflictingLint;
#[derive(Diagnostic)]
#[diag(passes_used_static)]
@@ -621,6 +628,15 @@ pub struct RustcAllowConstFnUnstable {
}
#[derive(Diagnostic)]
+#[diag(passes_rustc_safe_intrinsic)]
+pub struct RustcSafeIntrinsic {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
#[diag(passes_rustc_std_internal_symbol)]
pub struct RustcStdInternalSymbol {
#[primary_span]
@@ -809,6 +825,16 @@ pub struct MissingLangItem {
}
#[derive(Diagnostic)]
+#[diag(passes_lang_item_fn_with_target_feature)]
+pub struct LangItemWithTargetFeature {
+ #[primary_span]
+ pub attr_span: Span,
+ pub name: Symbol,
+ #[label]
+ pub sig_span: Span,
+}
+
+#[derive(Diagnostic)]
#[diag(passes_lang_item_on_incorrect_target, code = "E0718")]
pub struct LangItemOnIncorrectTarget {
#[primary_span]
@@ -873,32 +899,32 @@ pub struct DuplicateDiagnosticItemInCrate {
}
#[derive(Diagnostic)]
-#[diag(passes_abi)]
-pub struct Abi {
+#[diag(passes_layout_abi)]
+pub struct LayoutAbi {
#[primary_span]
pub span: Span,
pub abi: String,
}
#[derive(Diagnostic)]
-#[diag(passes_align)]
-pub struct Align {
+#[diag(passes_layout_align)]
+pub struct LayoutAlign {
#[primary_span]
pub span: Span,
pub align: String,
}
#[derive(Diagnostic)]
-#[diag(passes_size)]
-pub struct Size {
+#[diag(passes_layout_size)]
+pub struct LayoutSize {
#[primary_span]
pub span: Span,
pub size: String,
}
#[derive(Diagnostic)]
-#[diag(passes_homogeneous_aggregate)]
-pub struct HomogeneousAggregate {
+#[diag(passes_layout_homogeneous_aggregate)]
+pub struct LayoutHomogeneousAggregate {
#[primary_span]
pub span: Span,
pub homogeneous_aggregate: String,
@@ -914,6 +940,38 @@ pub struct LayoutOf {
}
#[derive(Diagnostic)]
+#[diag(passes_layout_invalid_attribute)]
+pub struct LayoutInvalidAttribute {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_abi_of)]
+pub struct AbiOf {
+ #[primary_span]
+ pub span: Span,
+ pub fn_name: Symbol,
+ pub fn_abi: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_abi_ne)]
+pub struct AbiNe {
+ #[primary_span]
+ pub span: Span,
+ pub left: String,
+ pub right: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(passes_abi_invalid_attribute)]
+pub struct AbiInvalidAttribute {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(Diagnostic)]
#[diag(passes_unrecognized_field)]
pub struct UnrecognizedField {
#[primary_span]
diff --git a/compiler/rustc_passes/src/hir_stats.rs b/compiler/rustc_passes/src/hir_stats.rs
index 5aa8aef6a..24087a4ea 100644
--- a/compiler/rustc_passes/src/hir_stats.rs
+++ b/compiler/rustc_passes/src/hir_stats.rs
@@ -587,6 +587,8 @@ impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> {
BareFn,
Never,
Tup,
+ AnonStruct,
+ AnonUnion,
Path,
TraitObject,
ImplTrait,
diff --git a/compiler/rustc_passes/src/lang_items.rs b/compiler/rustc_passes/src/lang_items.rs
index 476394f30..7e8372439 100644
--- a/compiler/rustc_passes/src/lang_items.rs
+++ b/compiler/rustc_passes/src/lang_items.rs
@@ -20,7 +20,8 @@ use rustc_hir::lang_items::{extract, GenericRequirement};
use rustc_hir::{LangItem, LanguageItems, Target};
use rustc_middle::ty::TyCtxt;
use rustc_session::cstore::ExternCrate;
-use rustc_span::{symbol::kw::Empty, Span};
+use rustc_span::symbol::kw::Empty;
+use rustc_span::{sym, Span};
use rustc_middle::query::Providers;
@@ -157,7 +158,14 @@ impl<'tcx> LanguageItemCollector<'tcx> {
self.tcx.hir().get_by_def_id(item_def_id)
{
let (actual_num, generics_span) = match kind.generics() {
- Some(generics) => (generics.params.len(), generics.span),
+ Some(generics) => (
+ generics
+ .params
+ .iter()
+ .filter(|p| !self.tcx.has_attr(p.def_id, sym::rustc_host))
+ .count(),
+ generics.span,
+ ),
None => (0, *item_span),
};
diff --git a/compiler/rustc_passes/src/layout_test.rs b/compiler/rustc_passes/src/layout_test.rs
index a7a8af864..e195f9ab6 100644
--- a/compiler/rustc_passes/src/layout_test.rs
+++ b/compiler/rustc_passes/src/layout_test.rs
@@ -2,33 +2,76 @@ use rustc_ast::Attribute;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::LocalDefId;
use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout};
-use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
+use rustc_middle::ty::{self, ParamEnv, Ty, TyCtxt};
use rustc_span::source_map::Spanned;
use rustc_span::symbol::sym;
use rustc_span::Span;
use rustc_target::abi::{HasDataLayout, TargetDataLayout};
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt;
+use rustc_trait_selection::{infer::TyCtxtInferExt, traits};
-use crate::errors::{Abi, Align, HomogeneousAggregate, LayoutOf, Size, UnrecognizedField};
+use crate::errors::{
+ LayoutAbi, LayoutAlign, LayoutHomogeneousAggregate, LayoutInvalidAttribute, LayoutOf,
+ LayoutSize, UnrecognizedField,
+};
pub fn test_layout(tcx: TyCtxt<'_>) {
- if tcx.features().rustc_attrs {
+ if !tcx.features().rustc_attrs {
// if the `rustc_attrs` feature is not enabled, don't bother testing layout
- for id in tcx.hir().items() {
- if matches!(
- tcx.def_kind(id.owner_id),
- DefKind::TyAlias { .. } | DefKind::Enum | DefKind::Struct | DefKind::Union
- ) {
- for attr in tcx.get_attrs(id.owner_id, sym::rustc_layout) {
- dump_layout_of(tcx, id.owner_id.def_id, attr);
+ return;
+ }
+ for id in tcx.hir_crate_items(()).definitions() {
+ for attr in tcx.get_attrs(id, sym::rustc_layout) {
+ match tcx.def_kind(id) {
+ DefKind::TyAlias | DefKind::Enum | DefKind::Struct | DefKind::Union => {
+ dump_layout_of(tcx, id, attr);
+ }
+ _ => {
+ tcx.sess.emit_err(LayoutInvalidAttribute { span: tcx.def_span(id) });
}
}
}
}
}
+pub fn ensure_wf<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ def_id: LocalDefId,
+ span: Span,
+) -> bool {
+ let pred = ty::ClauseKind::WellFormed(ty.into());
+ let obligation = traits::Obligation::new(
+ tcx,
+ traits::ObligationCause::new(
+ span,
+ def_id,
+ traits::ObligationCauseCode::WellFormed(Some(traits::WellFormedLoc::Ty(def_id))),
+ ),
+ param_env,
+ pred,
+ );
+ let infcx = tcx.infer_ctxt().build();
+ let ocx = traits::ObligationCtxt::new(&infcx);
+ ocx.register_obligation(obligation);
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ infcx.err_ctxt().report_fulfillment_errors(&errors);
+ false
+ } else {
+ // looks WF!
+ true
+ }
+}
+
fn dump_layout_of(tcx: TyCtxt<'_>, item_def_id: LocalDefId, attr: &Attribute) {
let param_env = tcx.param_env(item_def_id);
let ty = tcx.type_of(item_def_id).instantiate_identity();
+ let span = tcx.def_span(item_def_id.to_def_id());
+ if !ensure_wf(tcx, param_env, ty, item_def_id, span) {
+ return;
+ }
match tcx.layout_of(param_env.and(ty)) {
Ok(ty_layout) => {
// Check out the `#[rustc_layout(..)]` attribute to tell what to dump.
@@ -37,29 +80,24 @@ fn dump_layout_of(tcx: TyCtxt<'_>, item_def_id: LocalDefId, attr: &Attribute) {
for meta_item in meta_items {
match meta_item.name_or_empty() {
sym::abi => {
- tcx.sess.emit_err(Abi {
- span: tcx.def_span(item_def_id.to_def_id()),
- abi: format!("{:?}", ty_layout.abi),
- });
+ tcx.sess.emit_err(LayoutAbi { span, abi: format!("{:?}", ty_layout.abi) });
}
sym::align => {
- tcx.sess.emit_err(Align {
- span: tcx.def_span(item_def_id.to_def_id()),
+ tcx.sess.emit_err(LayoutAlign {
+ span,
align: format!("{:?}", ty_layout.align),
});
}
sym::size => {
- tcx.sess.emit_err(Size {
- span: tcx.def_span(item_def_id.to_def_id()),
- size: format!("{:?}", ty_layout.size),
- });
+ tcx.sess
+ .emit_err(LayoutSize { span, size: format!("{:?}", ty_layout.size) });
}
sym::homogeneous_aggregate => {
- tcx.sess.emit_err(HomogeneousAggregate {
- span: tcx.def_span(item_def_id.to_def_id()),
+ tcx.sess.emit_err(LayoutHomogeneousAggregate {
+ span,
homogeneous_aggregate: format!(
"{:?}",
ty_layout.homogeneous_aggregate(&UnwrapLayoutCx { tcx, param_env })
@@ -69,18 +107,15 @@ fn dump_layout_of(tcx: TyCtxt<'_>, item_def_id: LocalDefId, attr: &Attribute) {
sym::debug => {
let normalized_ty = format!(
- "{:?}",
+ "{}",
tcx.normalize_erasing_regions(
param_env.with_reveal_all_normalized(tcx),
ty,
)
);
+ // FIXME: using the `Debug` impl here isn't ideal.
let ty_layout = format!("{:#?}", *ty_layout);
- tcx.sess.emit_err(LayoutOf {
- span: tcx.def_span(item_def_id.to_def_id()),
- normalized_ty,
- ty_layout,
- });
+ tcx.sess.emit_err(LayoutOf { span, normalized_ty, ty_layout });
}
name => {
@@ -91,11 +126,7 @@ fn dump_layout_of(tcx: TyCtxt<'_>, item_def_id: LocalDefId, attr: &Attribute) {
}
Err(layout_error) => {
- tcx.sess.emit_fatal(Spanned {
- node: layout_error.into_diagnostic(),
-
- span: tcx.def_span(item_def_id.to_def_id()),
- });
+ tcx.sess.emit_fatal(Spanned { node: layout_error.into_diagnostic(), span });
}
}
}
diff --git a/compiler/rustc_passes/src/lib.rs b/compiler/rustc_passes/src/lib.rs
index 0da4b2946..51f3c9ad7 100644
--- a/compiler/rustc_passes/src/lib.rs
+++ b/compiler/rustc_passes/src/lib.rs
@@ -24,6 +24,7 @@ use rustc_errors::{DiagnosticMessage, SubdiagnosticMessage};
use rustc_fluent_macro::fluent_messages;
use rustc_middle::query::Providers;
+pub mod abi_test;
mod check_attr;
mod check_const;
pub mod dead;
diff --git a/compiler/rustc_passes/src/reachable.rs b/compiler/rustc_passes/src/reachable.rs
index e62833b35..1239d6d91 100644
--- a/compiler/rustc_passes/src/reachable.rs
+++ b/compiler/rustc_passes/src/reachable.rs
@@ -90,6 +90,10 @@ impl<'tcx> Visitor<'tcx> for ReachableContext<'tcx> {
.typeck_results()
.type_dependent_def(expr.hir_id)
.map(|(kind, def_id)| Res::Def(kind, def_id)),
+ hir::ExprKind::Closure(&hir::Closure { def_id, .. }) => {
+ self.reachable_symbols.insert(def_id);
+ None
+ }
_ => None,
};
diff --git a/compiler/rustc_privacy/messages.ftl b/compiler/rustc_privacy/messages.ftl
index b91e0d18a..7785f1a7f 100644
--- a/compiler/rustc_privacy/messages.ftl
+++ b/compiler/rustc_privacy/messages.ftl
@@ -11,11 +11,6 @@ privacy_in_public_interface = {$vis_descr} {$kind} `{$descr}` in public interfac
privacy_item_is_private = {$kind} `{$descr}` is private
.label = private {$kind}
-privacy_private_in_public_lint =
- {$vis_descr} {$kind} `{$descr}` in public interface (error {$kind ->
- [trait] E0445
- *[other] E0446
- })
privacy_private_interface_or_bounds_lint = {$ty_kind} `{$ty_descr}` is more private than the item `{$item_descr}`
.item_label = {$item_kind} `{$item_descr}` is reachable at visibility `{$item_vis_descr}`
diff --git a/compiler/rustc_privacy/src/errors.rs b/compiler/rustc_privacy/src/errors.rs
index da18f0c82..b1242f82f 100644
--- a/compiler/rustc_privacy/src/errors.rs
+++ b/compiler/rustc_privacy/src/errors.rs
@@ -47,21 +47,6 @@ pub struct UnnamedItemIsPrivate {
pub kind: &'static str,
}
-// Duplicate of `InPublicInterface` but with a different error code, shares the same slug.
-#[derive(Diagnostic)]
-#[diag(privacy_in_public_interface, code = "E0445")]
-pub struct InPublicInterfaceTraits<'a> {
- #[primary_span]
- #[label]
- pub span: Span,
- pub vis_descr: &'static str,
- pub kind: &'a str,
- pub descr: DiagnosticArgFromDisplay<'a>,
- #[label(privacy_visibility_label)]
- pub vis_span: Span,
-}
-
-// Duplicate of `InPublicInterfaceTraits` but with a different error code, shares the same slug.
#[derive(Diagnostic)]
#[diag(privacy_in_public_interface, code = "E0446")]
pub struct InPublicInterface<'a> {
@@ -92,14 +77,6 @@ pub struct FromPrivateDependencyInPublicInterface<'a> {
}
#[derive(LintDiagnostic)]
-#[diag(privacy_private_in_public_lint)]
-pub struct PrivateInPublicLint<'a> {
- pub vis_descr: &'static str,
- pub kind: &'a str,
- pub descr: DiagnosticArgFromDisplay<'a>,
-}
-
-#[derive(LintDiagnostic)]
#[diag(privacy_unnameable_types_lint)]
pub struct UnnameableTypesLint<'a> {
#[label]
diff --git a/compiler/rustc_privacy/src/lib.rs b/compiler/rustc_privacy/src/lib.rs
index 0eb344ba6..ab85f680f 100644
--- a/compiler/rustc_privacy/src/lib.rs
+++ b/compiler/rustc_privacy/src/lib.rs
@@ -22,7 +22,7 @@ use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::{DefId, LocalDefId, LocalModDefId, CRATE_DEF_ID};
use rustc_hir::intravisit::{self, Visitor};
-use rustc_hir::{AssocItemKind, ForeignItemKind, HirIdSet, ItemId, Node, PatKind};
+use rustc_hir::{AssocItemKind, ForeignItemKind, ItemId, Node, PatKind};
use rustc_middle::bug;
use rustc_middle::hir::nested_filter;
use rustc_middle::middle::privacy::{EffectiveVisibilities, EffectiveVisibility, Level};
@@ -42,8 +42,8 @@ use std::{fmt, mem};
use errors::{
FieldIsPrivate, FieldIsPrivateLabel, FromPrivateDependencyInPublicInterface, InPublicInterface,
- InPublicInterfaceTraits, ItemIsPrivate, PrivateInPublicLint, PrivateInterfacesOrBoundsLint,
- ReportEffectiveVisibility, UnnameableTypesLint, UnnamedItemIsPrivate,
+ ItemIsPrivate, PrivateInterfacesOrBoundsLint, ReportEffectiveVisibility, UnnameableTypesLint,
+ UnnamedItemIsPrivate,
};
fluent_messages! { "../messages.ftl" }
@@ -291,8 +291,7 @@ where
| ty::Param(..)
| ty::Bound(..)
| ty::Error(_)
- | ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..) => {}
+ | ty::GeneratorWitness(..) => {}
ty::Placeholder(..) | ty::Infer(..) => {
bug!("unexpected type: {:?}", ty)
}
@@ -364,6 +363,7 @@ trait VisibilityLike: Sized {
find.min
}
}
+
impl VisibilityLike for ty::Visibility {
const MAX: Self = ty::Visibility::Public;
fn new_min<const SHALLOW: bool>(
@@ -588,7 +588,7 @@ impl<'tcx> EmbargoVisitor<'tcx> {
self.update(def_id, macro_ev, Level::Reachable);
match def_kind {
// No type privacy, so can be directly marked as reachable.
- DefKind::Const | DefKind::Static(_) | DefKind::TraitAlias | DefKind::TyAlias { .. } => {
+ DefKind::Const | DefKind::Static(_) | DefKind::TraitAlias | DefKind::TyAlias => {
if vis.is_accessible_from(module, self.tcx) {
self.update(def_id, macro_ev, Level::Reachable);
}
@@ -835,7 +835,7 @@ impl ReachEverythingInTheInterfaceVisitor<'_, '_> {
self.visit(self.ev.tcx.type_of(param.def_id).instantiate_identity());
}
}
- GenericParamDefKind::Const { has_default } => {
+ GenericParamDefKind::Const { has_default, .. } => {
self.visit(self.ev.tcx.type_of(param.def_id).instantiate_identity());
if has_default {
self.visit(
@@ -1383,345 +1383,6 @@ impl<'tcx> DefIdVisitor<'tcx> for TypePrivacyVisitor<'tcx> {
}
///////////////////////////////////////////////////////////////////////////////
-/// Obsolete visitors for checking for private items in public interfaces.
-/// These visitors are supposed to be kept in frozen state and produce an
-/// "old error node set". For backward compatibility the new visitor reports
-/// warnings instead of hard errors when the erroneous node is not in this old set.
-///////////////////////////////////////////////////////////////////////////////
-
-struct ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
- tcx: TyCtxt<'tcx>,
- effective_visibilities: &'a EffectiveVisibilities,
- in_variant: bool,
- // Set of errors produced by this obsolete visitor.
- old_error_set: HirIdSet,
-}
-
-struct ObsoleteCheckTypeForPrivatenessVisitor<'a, 'b, 'tcx> {
- inner: &'a ObsoleteVisiblePrivateTypesVisitor<'b, 'tcx>,
- /// Whether the type refers to private types.
- contains_private: bool,
- /// Whether we've recurred at all (i.e., if we're pointing at the
- /// first type on which `visit_ty` was called).
- at_outer_type: bool,
- /// Whether that first type is a public path.
- outer_type_is_public_path: bool,
-}
-
-impl<'a, 'tcx> ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
- fn path_is_private_type(&self, path: &hir::Path<'_>) -> bool {
- let did = match path.res {
- Res::PrimTy(..) | Res::SelfTyParam { .. } | Res::SelfTyAlias { .. } | Res::Err => {
- return false;
- }
- res => res.def_id(),
- };
-
- // A path can only be private if:
- // it's in this crate...
- if let Some(did) = did.as_local() {
- // .. and it corresponds to a private type in the AST (this returns
- // `None` for type parameters).
- match self.tcx.hir().find(self.tcx.hir().local_def_id_to_hir_id(did)) {
- Some(Node::Item(_)) => !self.tcx.visibility(did).is_public(),
- Some(_) | None => false,
- }
- } else {
- false
- }
- }
-
- fn trait_is_public(&self, trait_id: LocalDefId) -> bool {
- // FIXME: this would preferably be using `exported_items`, but all
- // traits are exported currently (see `EmbargoVisitor.exported_trait`).
- self.effective_visibilities.is_directly_public(trait_id)
- }
-
- fn check_generic_bound(&mut self, bound: &hir::GenericBound<'_>) {
- if let hir::GenericBound::Trait(ref trait_ref, _) = *bound {
- if self.path_is_private_type(trait_ref.trait_ref.path) {
- self.old_error_set.insert(trait_ref.trait_ref.hir_ref_id);
- }
- }
- }
-
- fn item_is_public(&self, def_id: LocalDefId) -> bool {
- self.effective_visibilities.is_reachable(def_id) || self.tcx.visibility(def_id).is_public()
- }
-}
-
-impl<'a, 'b, 'tcx, 'v> Visitor<'v> for ObsoleteCheckTypeForPrivatenessVisitor<'a, 'b, 'tcx> {
- fn visit_generic_arg(&mut self, generic_arg: &'v hir::GenericArg<'v>) {
- match generic_arg {
- hir::GenericArg::Type(t) => self.visit_ty(t),
- hir::GenericArg::Infer(inf) => self.visit_ty(&inf.to_ty()),
- hir::GenericArg::Lifetime(_) | hir::GenericArg::Const(_) => {}
- }
- }
-
- fn visit_ty(&mut self, ty: &hir::Ty<'_>) {
- if let hir::TyKind::Path(hir::QPath::Resolved(_, path)) = ty.kind {
- if self.inner.path_is_private_type(path) {
- self.contains_private = true;
- // Found what we're looking for, so let's stop working.
- return;
- }
- }
- if let hir::TyKind::Path(_) = ty.kind {
- if self.at_outer_type {
- self.outer_type_is_public_path = true;
- }
- }
- self.at_outer_type = false;
- intravisit::walk_ty(self, ty)
- }
-
- // Don't want to recurse into `[, .. expr]`.
- fn visit_expr(&mut self, _: &hir::Expr<'_>) {}
-}
-
-impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
- type NestedFilter = nested_filter::All;
-
- /// We want to visit items in the context of their containing
- /// module and so forth, so supply a crate for doing a deep walk.
- fn nested_visit_map(&mut self) -> Self::Map {
- self.tcx.hir()
- }
-
- fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
- match item.kind {
- // Contents of a private mod can be re-exported, so we need
- // to check internals.
- hir::ItemKind::Mod(_) => {}
-
- // An `extern {}` doesn't introduce a new privacy
- // namespace (the contents have their own privacies).
- hir::ItemKind::ForeignMod { .. } => {}
-
- hir::ItemKind::Trait(.., bounds, _) => {
- if !self.trait_is_public(item.owner_id.def_id) {
- return;
- }
-
- for bound in bounds.iter() {
- self.check_generic_bound(bound)
- }
- }
-
- // Impls need some special handling to try to offer useful
- // error messages without (too many) false positives
- // (i.e., we could just return here to not check them at
- // all, or some worse estimation of whether an impl is
- // publicly visible).
- hir::ItemKind::Impl(ref impl_) => {
- // `impl [... for] Private` is never visible.
- let self_contains_private;
- // `impl [... for] Public<...>`, but not `impl [... for]
- // Vec<Public>` or `(Public,)`, etc.
- let self_is_public_path;
-
- // Check the properties of the `Self` type:
- {
- let mut visitor = ObsoleteCheckTypeForPrivatenessVisitor {
- inner: self,
- contains_private: false,
- at_outer_type: true,
- outer_type_is_public_path: false,
- };
- visitor.visit_ty(impl_.self_ty);
- self_contains_private = visitor.contains_private;
- self_is_public_path = visitor.outer_type_is_public_path;
- }
-
- // Miscellaneous info about the impl:
-
- // `true` iff this is `impl Private for ...`.
- let not_private_trait = impl_.of_trait.as_ref().map_or(
- true, // no trait counts as public trait
- |tr| {
- if let Some(def_id) = tr.path.res.def_id().as_local() {
- self.trait_is_public(def_id)
- } else {
- true // external traits must be public
- }
- },
- );
-
- // `true` iff this is a trait impl or at least one method is public.
- //
- // `impl Public { $( fn ...() {} )* }` is not visible.
- //
- // This is required over just using the methods' privacy
- // directly because we might have `impl<T: Foo<Private>> ...`,
- // and we shouldn't warn about the generics if all the methods
- // are private (because `T` won't be visible externally).
- let trait_or_some_public_method = impl_.of_trait.is_some()
- || impl_.items.iter().any(|impl_item_ref| {
- let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
- match impl_item.kind {
- hir::ImplItemKind::Const(..) | hir::ImplItemKind::Fn(..) => self
- .effective_visibilities
- .is_reachable(impl_item_ref.id.owner_id.def_id),
- hir::ImplItemKind::Type(_) => false,
- }
- });
-
- if !self_contains_private && not_private_trait && trait_or_some_public_method {
- intravisit::walk_generics(self, &impl_.generics);
-
- match impl_.of_trait {
- None => {
- for impl_item_ref in impl_.items {
- // This is where we choose whether to walk down
- // further into the impl to check its items. We
- // should only walk into public items so that we
- // don't erroneously report errors for private
- // types in private items.
- let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
- match impl_item.kind {
- hir::ImplItemKind::Const(..) | hir::ImplItemKind::Fn(..)
- if self.item_is_public(impl_item.owner_id.def_id) =>
- {
- intravisit::walk_impl_item(self, impl_item)
- }
- hir::ImplItemKind::Type(..) => {
- intravisit::walk_impl_item(self, impl_item)
- }
- _ => {}
- }
- }
- }
- Some(ref tr) => {
- // Any private types in a trait impl fall into three
- // categories.
- // 1. mentioned in the trait definition
- // 2. mentioned in the type params/generics
- // 3. mentioned in the associated types of the impl
- //
- // Those in 1. can only occur if the trait is in
- // this crate and will have been warned about on the
- // trait definition (there's no need to warn twice
- // so we don't check the methods).
- //
- // Those in 2. are warned via walk_generics and this
- // call here.
- intravisit::walk_path(self, tr.path);
-
- // Those in 3. are warned with this call.
- for impl_item_ref in impl_.items {
- let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
- if let hir::ImplItemKind::Type(ty) = impl_item.kind {
- self.visit_ty(ty);
- }
- }
- }
- }
- } else if impl_.of_trait.is_none() && self_is_public_path {
- // `impl Public<Private> { ... }`. Any public static
- // methods will be visible as `Public::foo`.
- let mut found_pub_static = false;
- for impl_item_ref in impl_.items {
- if self
- .effective_visibilities
- .is_reachable(impl_item_ref.id.owner_id.def_id)
- || self.tcx.visibility(impl_item_ref.id.owner_id).is_public()
- {
- let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
- match impl_item_ref.kind {
- AssocItemKind::Const => {
- found_pub_static = true;
- intravisit::walk_impl_item(self, impl_item);
- }
- AssocItemKind::Fn { has_self: false } => {
- found_pub_static = true;
- intravisit::walk_impl_item(self, impl_item);
- }
- _ => {}
- }
- }
- }
- if found_pub_static {
- intravisit::walk_generics(self, &impl_.generics)
- }
- }
- return;
- }
-
- // `type ... = ...;` can contain private types, because
- // we're introducing a new name.
- hir::ItemKind::TyAlias(..) => return,
-
- // Not at all public, so we don't care.
- _ if !self.item_is_public(item.owner_id.def_id) => {
- return;
- }
-
- _ => {}
- }
-
- // We've carefully constructed it so that if we're here, then
- // any `visit_ty`'s will be called on things that are in
- // public signatures, i.e., things that we're interested in for
- // this visitor.
- intravisit::walk_item(self, item);
- }
-
- fn visit_generics(&mut self, generics: &'tcx hir::Generics<'tcx>) {
- for predicate in generics.predicates {
- match predicate {
- hir::WherePredicate::BoundPredicate(bound_pred) => {
- for bound in bound_pred.bounds.iter() {
- self.check_generic_bound(bound)
- }
- }
- hir::WherePredicate::RegionPredicate(_) => {}
- hir::WherePredicate::EqPredicate(eq_pred) => {
- self.visit_ty(eq_pred.rhs_ty);
- }
- }
- }
- }
-
- fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem<'tcx>) {
- if self.effective_visibilities.is_reachable(item.owner_id.def_id) {
- intravisit::walk_foreign_item(self, item)
- }
- }
-
- fn visit_ty(&mut self, t: &'tcx hir::Ty<'tcx>) {
- if let hir::TyKind::Path(hir::QPath::Resolved(_, path)) = t.kind {
- if self.path_is_private_type(path) {
- self.old_error_set.insert(t.hir_id);
- }
- }
- intravisit::walk_ty(self, t)
- }
-
- fn visit_variant(&mut self, v: &'tcx hir::Variant<'tcx>) {
- if self.effective_visibilities.is_reachable(v.def_id) {
- self.in_variant = true;
- intravisit::walk_variant(self, v);
- self.in_variant = false;
- }
- }
-
- fn visit_field_def(&mut self, s: &'tcx hir::FieldDef<'tcx>) {
- let vis = self.tcx.visibility(s.def_id);
- if vis.is_public() || self.in_variant {
- intravisit::walk_field_def(self, s);
- }
- }
-
- // We don't need to introspect into these at all: an
- // expression/block context can't possibly contain exported things.
- // (Making them no-ops stops us from traversing the whole AST without
- // having to be super careful about our `walk_...` calls above.)
- fn visit_block(&mut self, _: &'tcx hir::Block<'tcx>) {}
- fn visit_expr(&mut self, _: &'tcx hir::Expr<'tcx>) {}
-}
-
-///////////////////////////////////////////////////////////////////////////////
/// SearchInterfaceForPrivateItemsVisitor traverses an item's interface and
/// finds any private components in it.
/// PrivateItemsInPublicInterfacesVisitor ensures there are no private types
@@ -1734,7 +1395,6 @@ struct SearchInterfaceForPrivateItemsVisitor<'tcx> {
/// The visitor checks that each component type is at least this visible.
required_visibility: ty::Visibility,
required_effective_vis: Option<EffectiveVisibility>,
- has_old_errors: bool,
in_assoc_ty: bool,
in_primary_interface: bool,
}
@@ -1802,14 +1462,15 @@ impl SearchInterfaceForPrivateItemsVisitor<'_> {
};
let vis = self.tcx.local_visibility(local_def_id);
- let hir_id = self.tcx.hir().local_def_id_to_hir_id(local_def_id);
let span = self.tcx.def_span(self.item_def_id.to_def_id());
let vis_span = self.tcx.def_span(def_id);
- if !vis.is_at_least(self.required_visibility, self.tcx) {
+ if self.in_assoc_ty && !vis.is_at_least(self.required_visibility, self.tcx) {
let vis_descr = match vis {
ty::Visibility::Public => "public",
ty::Visibility::Restricted(vis_def_id) => {
- if vis_def_id == self.tcx.parent_module(hir_id).to_local_def_id() {
+ if vis_def_id
+ == self.tcx.parent_module_from_def_id(local_def_id).to_local_def_id()
+ {
"private"
} else if vis_def_id.is_top_level_module() {
"crate-private"
@@ -1819,35 +1480,14 @@ impl SearchInterfaceForPrivateItemsVisitor<'_> {
}
};
- if self.has_old_errors
- || self.in_assoc_ty
- || self.tcx.resolutions(()).has_pub_restricted
- {
- if kind == "trait" {
- self.tcx.sess.emit_err(InPublicInterfaceTraits {
- span,
- vis_descr,
- kind,
- descr: descr.into(),
- vis_span,
- });
- } else {
- self.tcx.sess.emit_err(InPublicInterface {
- span,
- vis_descr,
- kind,
- descr: descr.into(),
- vis_span,
- });
- }
- } else {
- self.tcx.emit_spanned_lint(
- lint::builtin::PRIVATE_IN_PUBLIC,
- hir_id,
- span,
- PrivateInPublicLint { vis_descr, kind, descr: descr.into() },
- );
- }
+ self.tcx.sess.emit_err(InPublicInterface {
+ span,
+ vis_descr,
+ kind,
+ descr: descr.into(),
+ vis_span,
+ });
+ return false;
}
let Some(effective_vis) = self.required_effective_vis else {
@@ -1864,7 +1504,7 @@ impl SearchInterfaceForPrivateItemsVisitor<'_> {
};
self.tcx.emit_spanned_lint(
lint,
- hir_id,
+ self.tcx.hir().local_def_id_to_hir_id(self.item_def_id),
span,
PrivateInterfacesOrBoundsLint {
item_span: span,
@@ -1918,7 +1558,6 @@ impl<'tcx> DefIdVisitor<'tcx> for SearchInterfaceForPrivateItemsVisitor<'tcx> {
struct PrivateItemsInPublicInterfacesChecker<'tcx, 'a> {
tcx: TyCtxt<'tcx>,
- old_error_set_ancestry: HirIdSet,
effective_visibilities: &'a EffectiveVisibilities,
}
@@ -1934,9 +1573,6 @@ impl<'tcx> PrivateItemsInPublicInterfacesChecker<'tcx, '_> {
item_def_id: def_id,
required_visibility,
required_effective_vis,
- has_old_errors: self
- .old_error_set_ancestry
- .contains(&self.tcx.hir().local_def_id_to_hir_id(def_id)),
in_assoc_ty: false,
in_primary_interface: true,
}
@@ -2001,8 +1637,8 @@ impl<'tcx> PrivateItemsInPublicInterfacesChecker<'tcx, '_> {
let def_kind = tcx.def_kind(def_id);
match def_kind {
- DefKind::Const | DefKind::Static(_) | DefKind::Fn | DefKind::TyAlias { .. } => {
- if let DefKind::TyAlias { .. } = def_kind {
+ DefKind::Const | DefKind::Static(_) | DefKind::Fn | DefKind::TyAlias => {
+ if let DefKind::TyAlias = def_kind {
self.check_unnameable(def_id, effective_vis);
}
self.check(def_id, item_visibility, effective_vis).generics().predicates().ty();
@@ -2298,35 +1934,8 @@ fn effective_visibilities(tcx: TyCtxt<'_>, (): ()) -> &EffectiveVisibilities {
fn check_private_in_public(tcx: TyCtxt<'_>, (): ()) {
let effective_visibilities = tcx.effective_visibilities(());
-
- let mut visitor = ObsoleteVisiblePrivateTypesVisitor {
- tcx,
- effective_visibilities,
- in_variant: false,
- old_error_set: Default::default(),
- };
- tcx.hir().walk_toplevel_module(&mut visitor);
-
- let mut old_error_set_ancestry = HirIdSet::default();
- for mut id in visitor.old_error_set.iter().copied() {
- loop {
- if !old_error_set_ancestry.insert(id) {
- break;
- }
- let parent = tcx.hir().parent_id(id);
- if parent == id {
- break;
- }
- id = parent;
- }
- }
-
- // Check for private types and traits in public interfaces.
- let mut checker = PrivateItemsInPublicInterfacesChecker {
- tcx,
- old_error_set_ancestry,
- effective_visibilities,
- };
+ // Check for private types in public interfaces.
+ let mut checker = PrivateItemsInPublicInterfacesChecker { tcx, effective_visibilities };
for id in tcx.hir().items() {
checker.check_item(id);
diff --git a/compiler/rustc_query_impl/Cargo.toml b/compiler/rustc_query_impl/Cargo.toml
index ac697a3ae..a44dd5ede 100644
--- a/compiler/rustc_query_impl/Cargo.toml
+++ b/compiler/rustc_query_impl/Cargo.toml
@@ -9,7 +9,6 @@ edition = "2021"
[dependencies]
field-offset = "0.3.5"
measureme = "10.0.0"
-rustc_ast = { path = "../rustc_ast" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" }
rustc_hir = { path = "../rustc_hir" }
diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs
index 53005ede8..30621a135 100644
--- a/compiler/rustc_query_impl/src/lib.rs
+++ b/compiler/rustc_query_impl/src/lib.rs
@@ -11,7 +11,7 @@
#![allow(rustc::potential_query_instability, unused_parens)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#[macro_use]
extern crate rustc_middle;
@@ -41,7 +41,7 @@ use rustc_query_system::query::{
};
use rustc_query_system::HandleCycleError;
use rustc_query_system::Value;
-use rustc_span::Span;
+use rustc_span::{ErrorGuaranteed, Span};
#[macro_use]
mod plumbing;
@@ -92,7 +92,7 @@ where
}
#[inline(always)]
- fn query_state<'a>(self, qcx: QueryCtxt<'tcx>) -> &'a QueryState<Self::Key, DepKind>
+ fn query_state<'a>(self, qcx: QueryCtxt<'tcx>) -> &'a QueryState<Self::Key>
where
QueryCtxt<'tcx>: 'a,
{
@@ -145,9 +145,10 @@ where
fn value_from_cycle_error(
self,
tcx: TyCtxt<'tcx>,
- cycle: &[QueryInfo<DepKind>],
+ cycle: &[QueryInfo],
+ guar: ErrorGuaranteed,
) -> Self::Value {
- (self.dynamic.value_from_cycle_error)(tcx, cycle)
+ (self.dynamic.value_from_cycle_error)(tcx, cycle, guar)
}
#[inline(always)]
@@ -197,6 +198,8 @@ trait QueryConfigRestored<'tcx> {
type RestoredValue;
type Config: QueryConfig<QueryCtxt<'tcx>>;
+ const NAME: &'static &'static str;
+
fn config(tcx: TyCtxt<'tcx>) -> Self::Config;
fn restore(value: <Self::Config as QueryConfig<QueryCtxt<'tcx>>>::Value)
-> Self::RestoredValue;
diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs
index def6ac280..4516708ce 100644
--- a/compiler/rustc_query_impl/src/plumbing.rs
+++ b/compiler/rustc_query_impl/src/plumbing.rs
@@ -8,7 +8,9 @@ use crate::QueryConfigRestored;
use rustc_data_structures::stable_hasher::{Hash64, HashStable, StableHasher};
use rustc_data_structures::sync::Lock;
use rustc_errors::Diagnostic;
+
use rustc_index::Idx;
+use rustc_middle::dep_graph::dep_kinds;
use rustc_middle::dep_graph::{
self, DepKind, DepKindStruct, DepNode, DepNodeIndex, SerializedDepNodeIndex,
};
@@ -53,7 +55,7 @@ impl<'tcx> std::ops::Deref for QueryCtxt<'tcx> {
}
impl<'tcx> HasDepContext for QueryCtxt<'tcx> {
- type DepKind = rustc_middle::dep_graph::DepKind;
+ type Deps = rustc_middle::dep_graph::DepsType;
type DepContext = TyCtxt<'tcx>;
#[inline]
@@ -78,7 +80,7 @@ impl QueryContext for QueryCtxt<'_> {
tls::with_related_context(self.tcx, |icx| icx.query)
}
- fn try_collect_active_jobs(self) -> Option<QueryMap<DepKind>> {
+ fn try_collect_active_jobs(self) -> Option<QueryMap> {
let mut jobs = QueryMap::default();
for collect in super::TRY_COLLECT_ACTIVE_JOBS.iter() {
@@ -154,7 +156,7 @@ impl QueryContext for QueryCtxt<'_> {
let mut span = None;
let mut layout_of_depth = None;
if let Some(map) = self.try_collect_active_jobs() {
- if let Some((info, depth)) = job.try_find_layout_root(map) {
+ if let Some((info, depth)) = job.try_find_layout_root(map, dep_kinds::layout_of) {
span = Some(info.job.span);
layout_of_depth = Some(LayoutOfDepth { desc: info.query.description, depth });
}
@@ -300,7 +302,7 @@ pub(crate) fn create_query_frame<
key: K,
kind: DepKind,
name: &'static str,
-) -> QueryStackFrame<DepKind> {
+) -> QueryStackFrame {
// Avoid calling queries while formatting the description
let description = ty::print::with_no_queries!(
// Disable visible paths printing for performance reasons.
@@ -312,7 +314,7 @@ pub(crate) fn create_query_frame<
);
let description =
if tcx.sess.verbose() { format!("{description} [{name:?}]") } else { description };
- let span = if kind == dep_graph::DepKind::def_span || with_no_queries() {
+ let span = if kind == dep_graph::dep_kinds::def_span || with_no_queries() {
// The `def_span` query is used to calculate `default_span`,
// so exit to avoid infinite recursion.
None
@@ -320,7 +322,7 @@ pub(crate) fn create_query_frame<
Some(key.default_span(tcx))
};
let def_id = key.key_as_def_id();
- let def_kind = if kind == dep_graph::DepKind::opt_def_kind || with_no_queries() {
+ let def_kind = if kind == dep_graph::dep_kinds::opt_def_kind || with_no_queries() {
// Try to avoid infinite recursion.
None
} else {
@@ -329,7 +331,7 @@ pub(crate) fn create_query_frame<
let hash = || {
tcx.with_stable_hashing_context(|mut hcx| {
let mut hasher = StableHasher::new();
- std::mem::discriminant(&kind).hash_stable(&mut hcx, &mut hasher);
+ kind.as_usize().hash_stable(&mut hcx, &mut hasher);
key.hash_stable(&mut hcx, &mut hasher);
hasher.finish::<Hash64>()
})
@@ -348,8 +350,7 @@ pub(crate) fn encode_query_results<'a, 'tcx, Q>(
Q: super::QueryConfigRestored<'tcx>,
Q::RestoredValue: Encodable<CacheEncoder<'a, 'tcx>>,
{
- let _timer =
- qcx.profiler().verbose_generic_activity_with_arg("encode_query_results_for", query.name());
+ let _timer = qcx.profiler().generic_activity_with_arg("encode_query_results_for", query.name());
assert!(query.query_state(qcx).all_inactive());
let cache = query.query_cache(qcx);
@@ -431,8 +432,8 @@ where
// hit the cache instead of having to go through `force_from_dep_node`.
// This assertion makes sure, we actually keep applying the solution above.
debug_assert!(
- dep_node.kind != DepKind::codegen_unit,
- "calling force_from_dep_node() on DepKind::codegen_unit"
+ dep_node.kind != dep_kinds::codegen_unit,
+ "calling force_from_dep_node() on dep_kinds::codegen_unit"
);
if let Some(key) = Q::Key::recover(tcx, &dep_node) {
@@ -458,6 +459,7 @@ where
fingerprint_style,
force_from_dep_node: None,
try_load_from_on_disk_cache: None,
+ name: Q::NAME,
};
}
@@ -471,6 +473,7 @@ where
try_load_from_on_disk_cache: Some(|tcx, dep_node| {
try_load_from_on_disk_cache(Q::config(tcx), tcx, dep_node)
}),
+ name: Q::NAME,
}
}
@@ -566,7 +569,7 @@ macro_rules! define_queries {
DynamicQuery {
name: stringify!($name),
eval_always: is_eval_always!([$($modifiers)*]),
- dep_kind: dep_graph::DepKind::$name,
+ dep_kind: dep_graph::dep_kinds::$name,
handle_cycle_error: handle_cycle_error!([$($modifiers)*]),
query_state: offset_of!(QueryStates<'tcx> => $name),
query_cache: offset_of!(QueryCaches<'tcx> => $name),
@@ -605,8 +608,8 @@ macro_rules! define_queries {
} {
|_tcx, _key, _prev_index, _index| None
}),
- value_from_cycle_error: |tcx, cycle| {
- let result: queries::$name::Value<'tcx> = Value::from_cycle_error(tcx, cycle);
+ value_from_cycle_error: |tcx, cycle, guar| {
+ let result: queries::$name::Value<'tcx> = Value::from_cycle_error(tcx, cycle, guar);
erase(result)
},
loadable_from_disk: |_tcx, _key, _index| {
@@ -637,6 +640,8 @@ macro_rules! define_queries {
{ feedable!([$($modifiers)*]) },
>;
+ const NAME: &'static &'static str = &stringify!($name);
+
#[inline(always)]
fn config(tcx: TyCtxt<'tcx>) -> Self::Config {
DynamicConfig {
@@ -650,9 +655,9 @@ macro_rules! define_queries {
}
}
- pub fn try_collect_active_jobs<'tcx>(tcx: TyCtxt<'tcx>, qmap: &mut QueryMap<DepKind>) {
+ pub fn try_collect_active_jobs<'tcx>(tcx: TyCtxt<'tcx>, qmap: &mut QueryMap) {
let make_query = |tcx, key| {
- let kind = rustc_middle::dep_graph::DepKind::$name;
+ let kind = rustc_middle::dep_graph::dep_kinds::$name;
let name = stringify!($name);
$crate::plumbing::create_query_frame(tcx, rustc_middle::query::descs::$name, key, kind, name)
};
@@ -710,7 +715,7 @@ macro_rules! define_queries {
// These arrays are used for iteration and can't be indexed by `DepKind`.
- const TRY_COLLECT_ACTIVE_JOBS: &[for<'tcx> fn(TyCtxt<'tcx>, &mut QueryMap<DepKind>)] =
+ const TRY_COLLECT_ACTIVE_JOBS: &[for<'tcx> fn(TyCtxt<'tcx>, &mut QueryMap)] =
&[$(query_impl::$name::try_collect_active_jobs),*];
const ALLOC_SELF_PROFILE_QUERY_STRINGS: &[
@@ -738,6 +743,7 @@ macro_rules! define_queries {
fingerprint_style: FingerprintStyle::Unit,
force_from_dep_node: Some(|_, dep_node| bug!("force_from_dep_node: encountered {:?}", dep_node)),
try_load_from_on_disk_cache: None,
+ name: &"Null",
}
}
@@ -749,6 +755,7 @@ macro_rules! define_queries {
fingerprint_style: FingerprintStyle::Unit,
force_from_dep_node: Some(|_, dep_node| bug!("force_from_dep_node: encountered {:?}", dep_node)),
try_load_from_on_disk_cache: None,
+ name: &"Red",
}
}
@@ -759,6 +766,7 @@ macro_rules! define_queries {
fingerprint_style: FingerprintStyle::Unit,
force_from_dep_node: None,
try_load_from_on_disk_cache: None,
+ name: &"TraitSelect",
}
}
@@ -769,6 +777,7 @@ macro_rules! define_queries {
fingerprint_style: FingerprintStyle::Opaque,
force_from_dep_node: None,
try_load_from_on_disk_cache: None,
+ name: &"CompileCodegenUnit",
}
}
@@ -779,6 +788,7 @@ macro_rules! define_queries {
fingerprint_style: FingerprintStyle::Opaque,
force_from_dep_node: None,
try_load_from_on_disk_cache: None,
+ name: &"CompileMonoItem",
}
}
diff --git a/compiler/rustc_query_system/src/dep_graph/debug.rs b/compiler/rustc_query_system/src/dep_graph/debug.rs
index c2c9600f5..103a6c01b 100644
--- a/compiler/rustc_query_system/src/dep_graph/debug.rs
+++ b/compiler/rustc_query_system/src/dep_graph/debug.rs
@@ -1,6 +1,6 @@
//! Code for debugging the dep-graph.
-use super::{DepKind, DepNode, DepNodeIndex};
+use super::{DepNode, DepNodeIndex};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::Lock;
use std::error::Error;
@@ -28,7 +28,7 @@ impl DepNodeFilter {
}
/// Tests whether `node` meets the filter, returning true if so.
- pub fn test<K: DepKind>(&self, node: &DepNode<K>) -> bool {
+ pub fn test(&self, node: &DepNode) -> bool {
let debug_str = format!("{node:?}");
self.text.split('&').map(|s| s.trim()).all(|f| debug_str.contains(f))
}
@@ -36,14 +36,14 @@ impl DepNodeFilter {
/// A filter like `F -> G` where `F` and `G` are valid dep-node
/// filters. This can be used to test the source/target independently.
-pub struct EdgeFilter<K: DepKind> {
+pub struct EdgeFilter {
pub source: DepNodeFilter,
pub target: DepNodeFilter,
- pub index_to_node: Lock<FxHashMap<DepNodeIndex, DepNode<K>>>,
+ pub index_to_node: Lock<FxHashMap<DepNodeIndex, DepNode>>,
}
-impl<K: DepKind> EdgeFilter<K> {
- pub fn new(test: &str) -> Result<EdgeFilter<K>, Box<dyn Error>> {
+impl EdgeFilter {
+ pub fn new(test: &str) -> Result<EdgeFilter, Box<dyn Error>> {
let parts: Vec<_> = test.split("->").collect();
if parts.len() != 2 {
Err(format!("expected a filter like `a&b -> c&d`, not `{test}`").into())
@@ -57,7 +57,7 @@ impl<K: DepKind> EdgeFilter<K> {
}
#[cfg(debug_assertions)]
- pub fn test(&self, source: &DepNode<K>, target: &DepNode<K>) -> bool {
+ pub fn test(&self, source: &DepNode, target: &DepNode) -> bool {
self.source.test(source) && self.target.test(target)
}
}
diff --git a/compiler/rustc_query_system/src/dep_graph/dep_node.rs b/compiler/rustc_query_system/src/dep_graph/dep_node.rs
index 39a4cb1b1..17f96896a 100644
--- a/compiler/rustc_query_system/src/dep_graph/dep_node.rs
+++ b/compiler/rustc_query_system/src/dep_graph/dep_node.rs
@@ -42,36 +42,84 @@
//! `DefId` it was computed from. In other cases, too much information gets
//! lost during fingerprint computation.
-use super::{DepContext, DepKind, FingerprintStyle};
+use super::{DepContext, FingerprintStyle};
use crate::ich::StableHashingContext;
use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableOrd, ToStableHashKey};
+use rustc_data_structures::AtomicRef;
use rustc_hir::definitions::DefPathHash;
use std::fmt;
use std::hash::Hash;
-#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
-pub struct DepNode<K> {
- pub kind: K,
+/// This serves as an index into arrays built by `make_dep_kind_array`.
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct DepKind {
+ variant: u16,
+}
+
+impl DepKind {
+ #[inline]
+ pub const fn new(variant: u16) -> Self {
+ Self { variant }
+ }
+
+ #[inline]
+ pub const fn as_inner(&self) -> u16 {
+ self.variant
+ }
+
+ #[inline]
+ pub const fn as_usize(&self) -> usize {
+ self.variant as usize
+ }
+}
+
+static_assert_size!(DepKind, 2);
+
+pub fn default_dep_kind_debug(kind: DepKind, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("DepKind").field("variant", &kind.variant).finish()
+}
+
+pub static DEP_KIND_DEBUG: AtomicRef<fn(DepKind, &mut fmt::Formatter<'_>) -> fmt::Result> =
+ AtomicRef::new(&(default_dep_kind_debug as fn(_, &mut fmt::Formatter<'_>) -> _));
+
+impl fmt::Debug for DepKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (*DEP_KIND_DEBUG)(*self, f)
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct DepNode {
+ pub kind: DepKind,
pub hash: PackedFingerprint,
}
-impl<K: DepKind> DepNode<K> {
+// We keep a lot of `DepNode`s in memory during compilation. It's not
+// required that their size stay the same, but we don't want to change
+// it inadvertently. This assert just ensures we're aware of any change.
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+static_assert_size!(DepNode, 18);
+
+#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+static_assert_size!(DepNode, 24);
+
+impl DepNode {
/// Creates a new, parameterless DepNode. This method will assert
/// that the DepNode corresponding to the given DepKind actually
/// does not require any parameters.
- pub fn new_no_params<Tcx>(tcx: Tcx, kind: K) -> DepNode<K>
+ pub fn new_no_params<Tcx>(tcx: Tcx, kind: DepKind) -> DepNode
where
- Tcx: super::DepContext<DepKind = K>,
+ Tcx: super::DepContext,
{
debug_assert_eq!(tcx.fingerprint_style(kind), FingerprintStyle::Unit);
DepNode { kind, hash: Fingerprint::ZERO.into() }
}
- pub fn construct<Tcx, Key>(tcx: Tcx, kind: K, arg: &Key) -> DepNode<K>
+ pub fn construct<Tcx, Key>(tcx: Tcx, kind: DepKind, arg: &Key) -> DepNode
where
- Tcx: super::DepContext<DepKind = K>,
+ Tcx: super::DepContext,
Key: DepNodeParams<Tcx>,
{
let hash = arg.to_fingerprint(tcx);
@@ -93,18 +141,25 @@ impl<K: DepKind> DepNode<K> {
/// Construct a DepNode from the given DepKind and DefPathHash. This
/// method will assert that the given DepKind actually requires a
/// single DefId/DefPathHash parameter.
- pub fn from_def_path_hash<Tcx>(tcx: Tcx, def_path_hash: DefPathHash, kind: K) -> Self
+ pub fn from_def_path_hash<Tcx>(tcx: Tcx, def_path_hash: DefPathHash, kind: DepKind) -> Self
where
- Tcx: super::DepContext<DepKind = K>,
+ Tcx: super::DepContext,
{
debug_assert!(tcx.fingerprint_style(kind) == FingerprintStyle::DefPathHash);
DepNode { kind, hash: def_path_hash.0.into() }
}
}
-impl<K: DepKind> fmt::Debug for DepNode<K> {
+pub fn default_dep_node_debug(node: DepNode, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("DepNode").field("kind", &node.kind).field("hash", &node.hash).finish()
+}
+
+pub static DEP_NODE_DEBUG: AtomicRef<fn(DepNode, &mut fmt::Formatter<'_>) -> fmt::Result> =
+ AtomicRef::new(&(default_dep_node_debug as fn(_, &mut fmt::Formatter<'_>) -> _));
+
+impl fmt::Debug for DepNode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- K::debug_node(self, f)
+ (*DEP_NODE_DEBUG)(*self, f)
}
}
@@ -129,7 +184,7 @@ pub trait DepNodeParams<Tcx: DepContext>: fmt::Debug + Sized {
/// `fingerprint_style()` is not `FingerprintStyle::Opaque`.
/// It is always valid to return `None` here, in which case incremental
/// compilation will treat the query as having changed instead of forcing it.
- fn recover(tcx: Tcx, dep_node: &DepNode<Tcx::DepKind>) -> Option<Self>;
+ fn recover(tcx: Tcx, dep_node: &DepNode) -> Option<Self>;
}
impl<Tcx: DepContext, T> DepNodeParams<Tcx> for T
@@ -156,7 +211,7 @@ where
}
#[inline(always)]
- default fn recover(_: Tcx, _: &DepNode<Tcx::DepKind>) -> Option<Self> {
+ default fn recover(_: Tcx, _: &DepNode) -> Option<Self> {
None
}
}
@@ -216,10 +271,13 @@ pub struct DepKindStruct<Tcx: DepContext> {
/// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
/// is actually a `DefPathHash`, and can therefore just look up the corresponding
/// `DefId` in `tcx.def_path_hash_to_def_id`.
- pub force_from_dep_node: Option<fn(tcx: Tcx, dep_node: DepNode<Tcx::DepKind>) -> bool>,
+ pub force_from_dep_node: Option<fn(tcx: Tcx, dep_node: DepNode) -> bool>,
/// Invoke a query to put the on-disk cached value in memory.
- pub try_load_from_on_disk_cache: Option<fn(Tcx, DepNode<Tcx::DepKind>)>,
+ pub try_load_from_on_disk_cache: Option<fn(Tcx, DepNode)>,
+
+ /// The name of this dep kind.
+ pub name: &'static &'static str,
}
/// A "work product" corresponds to a `.o` (or other) file that we
diff --git a/compiler/rustc_query_system/src/dep_graph/edges.rs b/compiler/rustc_query_system/src/dep_graph/edges.rs
new file mode 100644
index 000000000..6ba3924f6
--- /dev/null
+++ b/compiler/rustc_query_system/src/dep_graph/edges.rs
@@ -0,0 +1,73 @@
+use crate::dep_graph::DepNodeIndex;
+use smallvec::SmallVec;
+use std::hash::{Hash, Hasher};
+use std::iter::Extend;
+use std::ops::Deref;
+
+#[derive(Default, Debug)]
+pub struct EdgesVec {
+ max: u32,
+ edges: SmallVec<[DepNodeIndex; EdgesVec::INLINE_CAPACITY]>,
+}
+
+impl Hash for EdgesVec {
+ #[inline]
+ fn hash<H: Hasher>(&self, hasher: &mut H) {
+ Hash::hash(&self.edges, hasher)
+ }
+}
+
+impl EdgesVec {
+ pub const INLINE_CAPACITY: usize = 8;
+
+ #[inline]
+ pub fn new() -> Self {
+ Self::default()
+ }
+
+ #[inline]
+ pub fn push(&mut self, edge: DepNodeIndex) {
+ self.max = self.max.max(edge.as_u32());
+ self.edges.push(edge);
+ }
+
+ #[inline]
+ pub fn max_index(&self) -> u32 {
+ self.max
+ }
+}
+
+impl Deref for EdgesVec {
+ type Target = [DepNodeIndex];
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ self.edges.as_slice()
+ }
+}
+
+impl FromIterator<DepNodeIndex> for EdgesVec {
+ #[inline]
+ fn from_iter<T>(iter: T) -> Self
+ where
+ T: IntoIterator<Item = DepNodeIndex>,
+ {
+ let mut vec = EdgesVec::new();
+ for index in iter {
+ vec.push(index)
+ }
+ vec
+ }
+}
+
+impl Extend<DepNodeIndex> for EdgesVec {
+ #[inline]
+ fn extend<T>(&mut self, iter: T)
+ where
+ T: IntoIterator<Item = DepNodeIndex>,
+ {
+ for elem in iter {
+ self.push(elem);
+ }
+ }
+}
diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs
index 30422ea11..c7e92d7b2 100644
--- a/compiler/rustc_query_system/src/dep_graph/graph.rs
+++ b/compiler/rustc_query_system/src/dep_graph/graph.rs
@@ -8,7 +8,6 @@ use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
use rustc_data_structures::unord::UnordMap;
use rustc_index::IndexVec;
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
-use smallvec::{smallvec, SmallVec};
use std::assert_matches::assert_matches;
use std::collections::hash_map::Entry;
use std::fmt::Debug;
@@ -18,7 +17,8 @@ use std::sync::atomic::Ordering::Relaxed;
use super::query::DepGraphQuery;
use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
-use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId};
+use super::{DepContext, DepKind, DepNode, Deps, HasDepContext, WorkProductId};
+use crate::dep_graph::EdgesVec;
use crate::ich::StableHashingContext;
use crate::query::{QueryContext, QuerySideEffects};
@@ -26,8 +26,8 @@ use crate::query::{QueryContext, QuerySideEffects};
use {super::debug::EdgeFilter, std::env};
#[derive(Clone)]
-pub struct DepGraph<K: DepKind> {
- data: Option<Lrc<DepGraphData<K>>>,
+pub struct DepGraph<D: Deps> {
+ data: Option<Lrc<DepGraphData<D>>>,
/// This field is used for assigning DepNodeIndices when running in
/// non-incremental mode. Even in non-incremental mode we make sure that
@@ -74,16 +74,16 @@ impl DepNodeColor {
}
}
-pub struct DepGraphData<K: DepKind> {
+pub struct DepGraphData<D: Deps> {
/// The new encoding of the dependency graph, optimized for red/green
/// tracking. The `current` field is the dependency graph of only the
/// current compilation session: We don't merge the previous dep-graph into
/// current one anymore, but we do reference shared data to save space.
- current: CurrentDepGraph<K>,
+ current: CurrentDepGraph<D>,
/// The dep-graph from the previous compilation session. It contains all
/// nodes and edges as well as all fingerprints of nodes that have them.
- previous: SerializedDepGraph<K>,
+ previous: SerializedDepGraph,
colors: DepNodeColorMap,
@@ -95,12 +95,12 @@ pub struct DepGraphData<K: DepKind> {
/// this map. We can later look for and extract that data.
previous_work_products: WorkProductMap,
- dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>,
+ dep_node_debug: Lock<FxHashMap<DepNode, String>>,
/// Used by incremental compilation tests to assert that
/// a particular query result was decoded from disk
/// (not just marked green)
- debug_loaded_from_disk: Lock<FxHashSet<DepNode<K>>>,
+ debug_loaded_from_disk: Lock<FxHashSet<DepNode>>,
}
pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
@@ -112,15 +112,15 @@ where
stable_hasher.finish()
}
-impl<K: DepKind> DepGraph<K> {
+impl<D: Deps> DepGraph<D> {
pub fn new(
profiler: &SelfProfilerRef,
- prev_graph: SerializedDepGraph<K>,
+ prev_graph: SerializedDepGraph,
prev_work_products: WorkProductMap,
encoder: FileEncoder,
record_graph: bool,
record_stats: bool,
- ) -> DepGraph<K> {
+ ) -> DepGraph<D> {
let prev_graph_node_count = prev_graph.node_count();
let current = CurrentDepGraph::new(
@@ -136,8 +136,8 @@ impl<K: DepKind> DepGraph<K> {
// Instantiate a dependy-less node only once for anonymous queries.
let _green_node_index = current.intern_new_node(
profiler,
- DepNode { kind: DepKind::NULL, hash: current.anon_id_seed.into() },
- smallvec![],
+ DepNode { kind: D::DEP_KIND_NULL, hash: current.anon_id_seed.into() },
+ EdgesVec::new(),
Fingerprint::ZERO,
);
assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE);
@@ -146,8 +146,8 @@ impl<K: DepKind> DepGraph<K> {
let (red_node_index, red_node_prev_index_and_color) = current.intern_node(
profiler,
&prev_graph,
- DepNode { kind: DepKind::RED, hash: Fingerprint::ZERO.into() },
- smallvec![],
+ DepNode { kind: D::DEP_KIND_RED, hash: Fingerprint::ZERO.into() },
+ EdgesVec::new(),
None,
false,
);
@@ -181,12 +181,12 @@ impl<K: DepKind> DepGraph<K> {
}
}
- pub fn new_disabled() -> DepGraph<K> {
+ pub fn new_disabled() -> DepGraph<D> {
DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
}
#[inline]
- pub fn data(&self) -> Option<&DepGraphData<K>> {
+ pub fn data(&self) -> Option<&DepGraphData<D>> {
self.data.as_deref()
}
@@ -196,7 +196,7 @@ impl<K: DepKind> DepGraph<K> {
self.data.is_some()
}
- pub fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) {
+ pub fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
if let Some(data) = &self.data {
data.current.encoder.borrow().with_query(f)
}
@@ -204,7 +204,7 @@ impl<K: DepKind> DepGraph<K> {
pub fn assert_ignored(&self) {
if let Some(..) = self.data {
- K::read_deps(|task_deps| {
+ D::read_deps(|task_deps| {
assert_matches!(
task_deps,
TaskDepsRef::Ignore,
@@ -218,7 +218,7 @@ impl<K: DepKind> DepGraph<K> {
where
OP: FnOnce() -> R,
{
- K::with_deps(TaskDepsRef::Ignore, op)
+ D::with_deps(TaskDepsRef::Ignore, op)
}
/// Used to wrap the deserialization of a query result from disk,
@@ -271,13 +271,13 @@ impl<K: DepKind> DepGraph<K> {
where
OP: FnOnce() -> R,
{
- K::with_deps(TaskDepsRef::Forbid, op)
+ D::with_deps(TaskDepsRef::Forbid, op)
}
#[inline(always)]
- pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
+ pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
&self,
- key: DepNode<K>,
+ key: DepNode,
cx: Ctxt,
arg: A,
task: fn(Ctxt, A) -> R,
@@ -289,10 +289,10 @@ impl<K: DepKind> DepGraph<K> {
}
}
- pub fn with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>(
+ pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
&self,
cx: Tcx,
- dep_kind: K,
+ dep_kind: DepKind,
op: OP,
) -> (R, DepNodeIndex)
where
@@ -305,7 +305,7 @@ impl<K: DepKind> DepGraph<K> {
}
}
-impl<K: DepKind> DepGraphData<K> {
+impl<D: Deps> DepGraphData<D> {
/// Starts a new dep-graph task. Dep-graph tasks are specified
/// using a free function (`task`) and **not** a closure -- this
/// is intentional because we want to exercise tight control over
@@ -334,9 +334,9 @@ impl<K: DepKind> DepGraphData<K> {
///
/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
#[inline(always)]
- pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
+ pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
&self,
- key: DepNode<K>,
+ key: DepNode,
cx: Ctxt,
arg: A,
task: fn(Ctxt, A) -> R,
@@ -354,14 +354,14 @@ impl<K: DepKind> DepGraphData<K> {
- dep-node: {key:?}"
);
- let with_deps = |task_deps| K::with_deps(task_deps, || task(cx, arg));
+ let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg));
let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
- (with_deps(TaskDepsRef::EvalAlways), smallvec![])
+ (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
} else {
let task_deps = Lock::new(TaskDeps {
#[cfg(debug_assertions)]
node: Some(key),
- reads: SmallVec::new(),
+ reads: EdgesVec::new(),
read_set: Default::default(),
phantom_data: PhantomData,
});
@@ -402,10 +402,10 @@ impl<K: DepKind> DepGraphData<K> {
/// Executes something within an "anonymous" task, that is, a task the
/// `DepNode` of which is determined by the list of inputs it read from.
- pub fn with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>(
+ pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
&self,
cx: Tcx,
- dep_kind: K,
+ dep_kind: DepKind,
op: OP,
) -> (R, DepNodeIndex)
where
@@ -414,7 +414,7 @@ impl<K: DepKind> DepGraphData<K> {
debug_assert!(!cx.is_eval_always(dep_kind));
let task_deps = Lock::new(TaskDeps::default());
- let result = K::with_deps(TaskDepsRef::Allow(&task_deps), op);
+ let result = D::with_deps(TaskDepsRef::Allow(&task_deps), op);
let task_deps = task_deps.into_inner();
let task_deps = task_deps.reads;
@@ -461,11 +461,11 @@ impl<K: DepKind> DepGraphData<K> {
}
}
-impl<K: DepKind> DepGraph<K> {
+impl<D: Deps> DepGraph<D> {
#[inline]
pub fn read_index(&self, dep_node_index: DepNodeIndex) {
if let Some(ref data) = self.data {
- K::read_deps(|task_deps| {
+ D::read_deps(|task_deps| {
let mut task_deps = match task_deps {
TaskDepsRef::Allow(deps) => deps.lock(),
TaskDepsRef::EvalAlways => {
@@ -486,14 +486,14 @@ impl<K: DepKind> DepGraph<K> {
// As long as we only have a low number of reads we can avoid doing a hash
// insert and potentially allocating/reallocating the hashmap
- let new_read = if task_deps.reads.len() < TASK_DEPS_READS_CAP {
+ let new_read = if task_deps.reads.len() < EdgesVec::INLINE_CAPACITY {
task_deps.reads.iter().all(|other| *other != dep_node_index)
} else {
task_deps.read_set.insert(dep_node_index)
};
if new_read {
task_deps.reads.push(dep_node_index);
- if task_deps.reads.len() == TASK_DEPS_READS_CAP {
+ if task_deps.reads.len() == EdgesVec::INLINE_CAPACITY {
// Fill `read_set` with what we have so far so we can use the hashset
// next time
task_deps.read_set.extend(task_deps.reads.iter().copied());
@@ -532,9 +532,9 @@ impl<K: DepKind> DepGraph<K> {
/// FIXME: If the code is changed enough for this node to be marked before requiring the
/// caller's node, we suppose that those changes will be enough to mark this node red and
/// force a recomputation using the "normal" way.
- pub fn with_feed_task<Ctxt: DepContext<DepKind = K>, A: Debug, R: Debug>(
+ pub fn with_feed_task<Ctxt: DepContext<Deps = D>, A: Debug, R: Debug>(
&self,
- node: DepNode<K>,
+ node: DepNode,
cx: Ctxt,
key: A,
result: &R,
@@ -572,8 +572,8 @@ impl<K: DepKind> DepGraph<K> {
}
}
- let mut edges = SmallVec::new();
- K::read_deps(|task_deps| match task_deps {
+ let mut edges = EdgesVec::new();
+ D::read_deps(|task_deps| match task_deps {
TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
TaskDepsRef::EvalAlways => {
edges.push(DepNodeIndex::FOREVER_RED_NODE);
@@ -623,27 +623,22 @@ impl<K: DepKind> DepGraph<K> {
}
}
-impl<K: DepKind> DepGraphData<K> {
+impl<D: Deps> DepGraphData<D> {
#[inline]
- pub fn dep_node_index_of_opt(&self, dep_node: &DepNode<K>) -> Option<DepNodeIndex> {
+ pub fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option<DepNodeIndex> {
if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
self.current.prev_index_to_index.lock()[prev_index]
} else {
- self.current
- .new_node_to_index
- .get_shard_by_value(dep_node)
- .lock()
- .get(dep_node)
- .copied()
+ self.current.new_node_to_index.lock_shard_by_value(dep_node).get(dep_node).copied()
}
}
#[inline]
- pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
+ pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
self.dep_node_index_of_opt(dep_node).is_some()
}
- fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
+ fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
self.colors.get(prev_index)
} else {
@@ -665,18 +660,18 @@ impl<K: DepKind> DepGraphData<K> {
}
#[inline]
- pub fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> DepNode<K> {
+ pub fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> DepNode {
self.previous.index_to_node(prev_index)
}
- pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode<K>) {
+ pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode) {
self.debug_loaded_from_disk.lock().insert(dep_node);
}
}
-impl<K: DepKind> DepGraph<K> {
+impl<D: Deps> DepGraph<D> {
#[inline]
- pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
+ pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
self.data.as_ref().is_some_and(|data| data.dep_node_exists(dep_node))
}
@@ -692,12 +687,12 @@ impl<K: DepKind> DepGraph<K> {
&self.data.as_ref().unwrap().previous_work_products
}
- pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode<K>) -> bool {
+ pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode) -> bool {
self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
}
#[inline(always)]
- pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode<K>, debug_str_gen: F)
+ pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
where
F: FnOnce() -> String,
{
@@ -710,11 +705,11 @@ impl<K: DepKind> DepGraph<K> {
dep_node_debug.borrow_mut().insert(dep_node, debug_str);
}
- pub fn dep_node_debug_str(&self, dep_node: DepNode<K>) -> Option<String> {
+ pub fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
}
- fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
+ fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
if let Some(ref data) = self.data {
return data.node_color(dep_node);
}
@@ -722,25 +717,25 @@ impl<K: DepKind> DepGraph<K> {
None
}
- pub fn try_mark_green<Qcx: QueryContext<DepKind = K>>(
+ pub fn try_mark_green<Qcx: QueryContext<Deps = D>>(
&self,
qcx: Qcx,
- dep_node: &DepNode<K>,
+ dep_node: &DepNode,
) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
self.data().and_then(|data| data.try_mark_green(qcx, dep_node))
}
}
-impl<K: DepKind> DepGraphData<K> {
+impl<D: Deps> DepGraphData<D> {
/// Try to mark a node index for the node dep_node.
///
/// A node will have an index, when it's already been marked green, or when we can mark it
/// green. This function will mark the current task as a reader of the specified node, when
/// a node index can be found for that node.
- pub fn try_mark_green<Qcx: QueryContext<DepKind = K>>(
+ pub fn try_mark_green<Qcx: QueryContext<Deps = D>>(
&self,
qcx: Qcx,
- dep_node: &DepNode<K>,
+ dep_node: &DepNode,
) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
@@ -762,11 +757,11 @@ impl<K: DepKind> DepGraphData<K> {
}
#[instrument(skip(self, qcx, parent_dep_node_index, frame), level = "debug")]
- fn try_mark_parent_green<Qcx: QueryContext<DepKind = K>>(
+ fn try_mark_parent_green<Qcx: QueryContext<Deps = D>>(
&self,
qcx: Qcx,
parent_dep_node_index: SerializedDepNodeIndex,
- dep_node: &DepNode<K>,
+ dep_node: &DepNode,
frame: Option<&MarkFrame<'_>>,
) -> Option<()> {
let dep_dep_node_color = self.colors.get(parent_dep_node_index);
@@ -850,11 +845,11 @@ impl<K: DepKind> DepGraphData<K> {
/// Try to mark a dep-node which existed in the previous compilation session as green.
#[instrument(skip(self, qcx, prev_dep_node_index, frame), level = "debug")]
- fn try_mark_previous_green<Qcx: QueryContext<DepKind = K>>(
+ fn try_mark_previous_green<Qcx: QueryContext<Deps = D>>(
&self,
qcx: Qcx,
prev_dep_node_index: SerializedDepNodeIndex,
- dep_node: &DepNode<K>,
+ dep_node: &DepNode,
frame: Option<&MarkFrame<'_>>,
) -> Option<DepNodeIndex> {
let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
@@ -872,7 +867,7 @@ impl<K: DepKind> DepGraphData<K> {
let prev_deps = self.previous.edge_targets_from(prev_dep_node_index);
- for &dep_dep_node_index in prev_deps {
+ for dep_dep_node_index in prev_deps {
self.try_mark_parent_green(qcx, dep_dep_node_index, dep_node, Some(&frame))?;
}
@@ -921,7 +916,7 @@ impl<K: DepKind> DepGraphData<K> {
/// This may be called concurrently on multiple threads for the same dep node.
#[cold]
#[inline(never)]
- fn emit_side_effects<Qcx: QueryContext<DepKind = K>>(
+ fn emit_side_effects<Qcx: QueryContext<Deps = D>>(
&self,
qcx: Qcx,
dep_node_index: DepNodeIndex,
@@ -945,16 +940,16 @@ impl<K: DepKind> DepGraphData<K> {
}
}
-impl<K: DepKind> DepGraph<K> {
+impl<D: Deps> DepGraph<D> {
/// Returns true if the given node has been marked as red during the
/// current compilation session. Used in various assertions
- pub fn is_red(&self, dep_node: &DepNode<K>) -> bool {
+ pub fn is_red(&self, dep_node: &DepNode) -> bool {
self.node_color(dep_node) == Some(DepNodeColor::Red)
}
/// Returns true if the given node has been marked as green during the
/// current compilation session. Used in various assertions
- pub fn is_green(&self, dep_node: &DepNode<K>) -> bool {
+ pub fn is_green(&self, dep_node: &DepNode) -> bool {
self.node_color(dep_node).is_some_and(|c| c.is_green())
}
@@ -966,7 +961,7 @@ impl<K: DepKind> DepGraph<K> {
///
/// This method will only load queries that will end up in the disk cache.
/// Other queries will not be executed.
- pub fn exec_cache_promotions<Tcx: DepContext<DepKind = K>>(&self, tcx: Tcx) {
+ pub fn exec_cache_promotions<Tcx: DepContext>(&self, tcx: Tcx) {
let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
let data = self.data.as_ref().unwrap();
@@ -1081,9 +1076,9 @@ rustc_index::newtype_index! {
/// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
/// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index`
/// first, and `data` second.
-pub(super) struct CurrentDepGraph<K: DepKind> {
- encoder: Steal<GraphEncoder<K>>,
- new_node_to_index: Sharded<FxHashMap<DepNode<K>, DepNodeIndex>>,
+pub(super) struct CurrentDepGraph<D: Deps> {
+ encoder: Steal<GraphEncoder<D>>,
+ new_node_to_index: Sharded<FxHashMap<DepNode, DepNodeIndex>>,
prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
/// This is used to verify that fingerprints do not change between the creation of a node
@@ -1094,7 +1089,7 @@ pub(super) struct CurrentDepGraph<K: DepKind> {
/// Used to trap when a specific edge is added to the graph.
/// This is used for debug purposes and is only active with `debug_assertions`.
#[cfg(debug_assertions)]
- forbidden_edge: Option<EdgeFilter<K>>,
+ forbidden_edge: Option<EdgeFilter>,
/// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
/// their edges. This has the beneficial side-effect that multiple anonymous
@@ -1121,14 +1116,14 @@ pub(super) struct CurrentDepGraph<K: DepKind> {
node_intern_event_id: Option<EventId>,
}
-impl<K: DepKind> CurrentDepGraph<K> {
+impl<D: Deps> CurrentDepGraph<D> {
fn new(
profiler: &SelfProfilerRef,
prev_graph_node_count: usize,
encoder: FileEncoder,
record_graph: bool,
record_stats: bool,
- ) -> CurrentDepGraph<K> {
+ ) -> Self {
use std::time::{SystemTime, UNIX_EPOCH};
let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
@@ -1166,7 +1161,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
)),
new_node_to_index: Sharded::new(|| {
FxHashMap::with_capacity_and_hasher(
- new_node_count_estimate / sharded::SHARDS,
+ new_node_count_estimate / sharded::shards(),
Default::default(),
)
}),
@@ -1183,7 +1178,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
}
#[cfg(debug_assertions)]
- fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode<K>, fingerprint: Fingerprint) {
+ fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode, fingerprint: Fingerprint) {
if let Some(forbidden_edge) = &self.forbidden_edge {
forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
}
@@ -1197,12 +1192,11 @@ impl<K: DepKind> CurrentDepGraph<K> {
fn intern_new_node(
&self,
profiler: &SelfProfilerRef,
- key: DepNode<K>,
+ key: DepNode,
edges: EdgesVec,
current_fingerprint: Fingerprint,
) -> DepNodeIndex {
- let dep_node_index = match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key)
- {
+ let dep_node_index = match self.new_node_to_index.lock_shard_by_value(&key).entry(key) {
Entry::Occupied(entry) => *entry.get(),
Entry::Vacant(entry) => {
let dep_node_index =
@@ -1221,8 +1215,8 @@ impl<K: DepKind> CurrentDepGraph<K> {
fn intern_node(
&self,
profiler: &SelfProfilerRef,
- prev_graph: &SerializedDepGraph<K>,
- key: DepNode<K>,
+ prev_graph: &SerializedDepGraph,
+ key: DepNode,
edges: EdgesVec,
fingerprint: Option<Fingerprint>,
print_status: bool,
@@ -1295,7 +1289,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
fn promote_node_and_deps_to_current(
&self,
profiler: &SelfProfilerRef,
- prev_graph: &SerializedDepGraph<K>,
+ prev_graph: &SerializedDepGraph,
prev_index: SerializedDepNodeIndex,
) -> DepNodeIndex {
self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
@@ -1308,8 +1302,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
let key = prev_graph.index_to_node(prev_index);
let edges = prev_graph
.edge_targets_from(prev_index)
- .iter()
- .map(|i| prev_index_to_index[*i].unwrap())
+ .map(|i| prev_index_to_index[i].unwrap())
.collect();
let fingerprint = prev_graph.fingerprint_by_index(prev_index);
let dep_node_index = self.encoder.borrow().send(profiler, key, fingerprint, edges);
@@ -1324,27 +1317,23 @@ impl<K: DepKind> CurrentDepGraph<K> {
#[inline]
fn debug_assert_not_in_new_nodes(
&self,
- prev_graph: &SerializedDepGraph<K>,
+ prev_graph: &SerializedDepGraph,
prev_index: SerializedDepNodeIndex,
) {
let node = &prev_graph.index_to_node(prev_index);
debug_assert!(
- !self.new_node_to_index.get_shard_by_value(node).lock().contains_key(node),
+ !self.new_node_to_index.lock_shard_by_value(node).contains_key(node),
"node from previous graph present in new node collection"
);
}
}
-/// The capacity of the `reads` field `SmallVec`
-const TASK_DEPS_READS_CAP: usize = 8;
-type EdgesVec = SmallVec<[DepNodeIndex; TASK_DEPS_READS_CAP]>;
-
#[derive(Debug, Clone, Copy)]
-pub enum TaskDepsRef<'a, K: DepKind> {
+pub enum TaskDepsRef<'a> {
/// New dependencies can be added to the
/// `TaskDeps`. This is used when executing a 'normal' query
/// (no `eval_always` modifier)
- Allow(&'a Lock<TaskDeps<K>>),
+ Allow(&'a Lock<TaskDeps>),
/// This is used when executing an `eval_always` query. We don't
/// need to track dependencies for a query that's always
/// re-executed -- but we need to know that this is an `eval_always`
@@ -1361,15 +1350,15 @@ pub enum TaskDepsRef<'a, K: DepKind> {
}
#[derive(Debug)]
-pub struct TaskDeps<K: DepKind> {
+pub struct TaskDeps {
#[cfg(debug_assertions)]
- node: Option<DepNode<K>>,
+ node: Option<DepNode>,
reads: EdgesVec,
read_set: FxHashSet<DepNodeIndex>,
- phantom_data: PhantomData<DepNode<K>>,
+ phantom_data: PhantomData<DepNode>,
}
-impl<K: DepKind> Default for TaskDeps<K> {
+impl Default for TaskDeps {
fn default() -> Self {
Self {
#[cfg(debug_assertions)]
@@ -1421,10 +1410,7 @@ impl DepNodeColorMap {
#[inline(never)]
#[cold]
-pub(crate) fn print_markframe_trace<K: DepKind>(
- graph: &DepGraph<K>,
- frame: Option<&MarkFrame<'_>>,
-) {
+pub(crate) fn print_markframe_trace<D: Deps>(graph: &DepGraph<D>, frame: Option<&MarkFrame<'_>>) {
let data = graph.data.as_ref().unwrap();
eprintln!("there was a panic while trying to force a dep node");
diff --git a/compiler/rustc_query_system/src/dep_graph/mod.rs b/compiler/rustc_query_system/src/dep_graph/mod.rs
index 0fd9e35d6..624ae680a 100644
--- a/compiler/rustc_query_system/src/dep_graph/mod.rs
+++ b/compiler/rustc_query_system/src/dep_graph/mod.rs
@@ -1,10 +1,12 @@
pub mod debug;
-mod dep_node;
+pub mod dep_node;
+mod edges;
mod graph;
mod query;
mod serialized;
-pub use dep_node::{DepKindStruct, DepNode, DepNodeParams, WorkProductId};
+pub use dep_node::{DepKind, DepKindStruct, DepNode, DepNodeParams, WorkProductId};
+pub use edges::EdgesVec;
pub use graph::{
hash_result, DepGraph, DepGraphData, DepNodeColor, DepNodeIndex, TaskDeps, TaskDepsRef,
WorkProduct, WorkProductMap,
@@ -14,22 +16,20 @@ pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex};
use crate::ich::StableHashingContext;
use rustc_data_structures::profiling::SelfProfilerRef;
-use rustc_serialize::{opaque::FileEncoder, Encodable};
use rustc_session::Session;
-use std::hash::Hash;
-use std::{fmt, panic};
+use std::panic;
use self::graph::{print_markframe_trace, MarkFrame};
pub trait DepContext: Copy {
- type DepKind: self::DepKind;
+ type Deps: Deps;
/// Create a hashing context for hashing new results.
fn with_stable_hashing_context<R>(self, f: impl FnOnce(StableHashingContext<'_>) -> R) -> R;
/// Access the DepGraph.
- fn dep_graph(&self) -> &DepGraph<Self::DepKind>;
+ fn dep_graph(&self) -> &DepGraph<Self::Deps>;
/// Access the profiler.
fn profiler(&self) -> &SelfProfilerRef;
@@ -37,10 +37,10 @@ pub trait DepContext: Copy {
/// Access the compiler session.
fn sess(&self) -> &Session;
- fn dep_kind_info(&self, dep_node: Self::DepKind) -> &DepKindStruct<Self>;
+ fn dep_kind_info(&self, dep_node: DepKind) -> &DepKindStruct<Self>;
#[inline(always)]
- fn fingerprint_style(self, kind: Self::DepKind) -> FingerprintStyle {
+ fn fingerprint_style(self, kind: DepKind) -> FingerprintStyle {
let data = self.dep_kind_info(kind);
if data.is_anon {
return FingerprintStyle::Opaque;
@@ -50,18 +50,14 @@ pub trait DepContext: Copy {
#[inline(always)]
/// Return whether this kind always require evaluation.
- fn is_eval_always(self, kind: Self::DepKind) -> bool {
+ fn is_eval_always(self, kind: DepKind) -> bool {
self.dep_kind_info(kind).is_eval_always
}
/// Try to force a dep node to execute and see if it's green.
#[inline]
#[instrument(skip(self, frame), level = "debug")]
- fn try_force_from_dep_node(
- self,
- dep_node: DepNode<Self::DepKind>,
- frame: Option<&MarkFrame<'_>>,
- ) -> bool {
+ fn try_force_from_dep_node(self, dep_node: DepNode, frame: Option<&MarkFrame<'_>>) -> bool {
let cb = self.dep_kind_info(dep_node.kind);
if let Some(f) = cb.force_from_dep_node {
if let Err(value) = panic::catch_unwind(panic::AssertUnwindSafe(|| {
@@ -79,7 +75,7 @@ pub trait DepContext: Copy {
}
/// Load data from the on-disk cache.
- fn try_load_from_on_disk_cache(self, dep_node: DepNode<Self::DepKind>) {
+ fn try_load_from_on_disk_cache(self, dep_node: DepNode) {
let cb = self.dep_kind_info(dep_node.kind);
if let Some(f) = cb.try_load_from_on_disk_cache {
f(self, dep_node)
@@ -87,15 +83,37 @@ pub trait DepContext: Copy {
}
}
+pub trait Deps {
+ /// Execute the operation with provided dependencies.
+ fn with_deps<OP, R>(deps: TaskDepsRef<'_>, op: OP) -> R
+ where
+ OP: FnOnce() -> R;
+
+ /// Access dependencies from current implicit context.
+ fn read_deps<OP>(op: OP)
+ where
+ OP: for<'a> FnOnce(TaskDepsRef<'a>);
+
+ /// We use this for most things when incr. comp. is turned off.
+ const DEP_KIND_NULL: DepKind;
+
+ /// We use this to create a forever-red node.
+ const DEP_KIND_RED: DepKind;
+
+ /// This is the highest value a `DepKind` can have. It's used during encoding to
+ /// pack information into the unused bits.
+ const DEP_KIND_MAX: u16;
+}
+
pub trait HasDepContext: Copy {
- type DepKind: self::DepKind;
- type DepContext: self::DepContext<DepKind = Self::DepKind>;
+ type Deps: self::Deps;
+ type DepContext: self::DepContext<Deps = Self::Deps>;
fn dep_context(&self) -> &Self::DepContext;
}
impl<T: DepContext> HasDepContext for T {
- type DepKind = T::DepKind;
+ type Deps = T::Deps;
type DepContext = Self;
fn dep_context(&self) -> &Self::DepContext {
@@ -104,7 +122,7 @@ impl<T: DepContext> HasDepContext for T {
}
impl<T: HasDepContext, Q: Copy> HasDepContext for (T, Q) {
- type DepKind = T::DepKind;
+ type Deps = T::Deps;
type DepContext = T::DepContext;
fn dep_context(&self) -> &Self::DepContext {
@@ -136,25 +154,3 @@ impl FingerprintStyle {
}
}
}
-
-/// Describe the different families of dependency nodes.
-pub trait DepKind: Copy + fmt::Debug + Eq + Hash + Send + Encodable<FileEncoder> + 'static {
- /// DepKind to use when incr. comp. is turned off.
- const NULL: Self;
-
- /// DepKind to use to create the initial forever-red node.
- const RED: Self;
-
- /// Implementation of `std::fmt::Debug` for `DepNode`.
- fn debug_node(node: &DepNode<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result;
-
- /// Execute the operation with provided dependencies.
- fn with_deps<OP, R>(deps: TaskDepsRef<'_, Self>, op: OP) -> R
- where
- OP: FnOnce() -> R;
-
- /// Access dependencies from current implicit context.
- fn read_deps<OP>(op: OP)
- where
- OP: for<'a> FnOnce(TaskDepsRef<'a, Self>);
-}
diff --git a/compiler/rustc_query_system/src/dep_graph/query.rs b/compiler/rustc_query_system/src/dep_graph/query.rs
index 5cbc6bf8f..5969e5fbe 100644
--- a/compiler/rustc_query_system/src/dep_graph/query.rs
+++ b/compiler/rustc_query_system/src/dep_graph/query.rs
@@ -2,16 +2,16 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::graph::implementation::{Direction, Graph, NodeIndex, INCOMING};
use rustc_index::IndexVec;
-use super::{DepKind, DepNode, DepNodeIndex};
+use super::{DepNode, DepNodeIndex};
-pub struct DepGraphQuery<K> {
- pub graph: Graph<DepNode<K>, ()>,
- pub indices: FxHashMap<DepNode<K>, NodeIndex>,
+pub struct DepGraphQuery {
+ pub graph: Graph<DepNode, ()>,
+ pub indices: FxHashMap<DepNode, NodeIndex>,
pub dep_index_to_index: IndexVec<DepNodeIndex, Option<NodeIndex>>,
}
-impl<K: DepKind> DepGraphQuery<K> {
- pub fn new(prev_node_count: usize) -> DepGraphQuery<K> {
+impl DepGraphQuery {
+ pub fn new(prev_node_count: usize) -> DepGraphQuery {
let node_count = prev_node_count + prev_node_count / 4;
let edge_count = 6 * node_count;
@@ -22,7 +22,7 @@ impl<K: DepKind> DepGraphQuery<K> {
DepGraphQuery { graph, indices, dep_index_to_index }
}
- pub fn push(&mut self, index: DepNodeIndex, node: DepNode<K>, edges: &[DepNodeIndex]) {
+ pub fn push(&mut self, index: DepNodeIndex, node: DepNode, edges: &[DepNodeIndex]) {
let source = self.graph.add_node(node);
self.dep_index_to_index.insert(index, source);
self.indices.insert(node, source);
@@ -37,11 +37,11 @@ impl<K: DepKind> DepGraphQuery<K> {
}
}
- pub fn nodes(&self) -> Vec<&DepNode<K>> {
+ pub fn nodes(&self) -> Vec<&DepNode> {
self.graph.all_nodes().iter().map(|n| &n.data).collect()
}
- pub fn edges(&self) -> Vec<(&DepNode<K>, &DepNode<K>)> {
+ pub fn edges(&self) -> Vec<(&DepNode, &DepNode)> {
self.graph
.all_edges()
.iter()
@@ -50,7 +50,7 @@ impl<K: DepKind> DepGraphQuery<K> {
.collect()
}
- fn reachable_nodes(&self, node: &DepNode<K>, direction: Direction) -> Vec<&DepNode<K>> {
+ fn reachable_nodes(&self, node: &DepNode, direction: Direction) -> Vec<&DepNode> {
if let Some(&index) = self.indices.get(node) {
self.graph.depth_traverse(index, direction).map(|s| self.graph.node_data(s)).collect()
} else {
@@ -59,7 +59,7 @@ impl<K: DepKind> DepGraphQuery<K> {
}
/// All nodes that can reach `node`.
- pub fn transitive_predecessors(&self, node: &DepNode<K>) -> Vec<&DepNode<K>> {
+ pub fn transitive_predecessors(&self, node: &DepNode) -> Vec<&DepNode> {
self.reachable_nodes(node, INCOMING)
}
}
diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs
index edddfda62..fcf46be6e 100644
--- a/compiler/rustc_query_system/src/dep_graph/serialized.rs
+++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs
@@ -1,6 +1,6 @@
//! The data that we will serialize and deserialize.
//!
-//! The dep-graph is serialized as a sequence of NodeInfo, with the dependencies
+//! Notionally, the dep-graph is a sequence of NodeInfo with the dependencies
//! specified inline. The total number of nodes and edges are stored as the last
//! 16 bytes of the file, so we can find them easily at decoding time.
//!
@@ -11,17 +11,44 @@
//! sequence of NodeInfos to the different arrays in SerializedDepGraph. Since the
//! node and edge count are stored at the end of the file, all the arrays can be
//! pre-allocated with the right length.
+//!
+//! The encoding of the de-pgraph is generally designed around the fact that fixed-size
+//! reads of encoded data are generally faster than variable-sized reads. Ergo we adopt
+//! essentially the same varint encoding scheme used in the rmeta format; the edge lists
+//! for each node on the graph store a 2-bit integer which is the number of bytes per edge
+//! index in that node's edge list. We effectively ignore that an edge index of 0 could be
+//! encoded with 0 bytes in order to not require 3 bits to store the byte width of the edges.
+//! The overhead of calculating the correct byte width for each edge is mitigated by
+//! building edge lists with [`EdgesVec`] which keeps a running max of the edges in a node.
+//!
+//! When we decode this data, we do not immediately create [`SerializedDepNodeIndex`] and
+//! instead keep the data in its denser serialized form which lets us turn our on-disk size
+//! efficiency directly into a peak memory reduction. When we convert these encoded-in-memory
+//! values into their fully-deserialized type, we use a fixed-size read of the encoded array
+//! then mask off any errant bytes we read. The array of edge index bytes is padded to permit this.
+//!
+//! We also encode and decode the entire rest of each node using [`SerializedNodeHeader`]
+//! to let this encoding and decoding be done in one fixed-size operation. These headers contain
+//! two [`Fingerprint`]s along with the serialized [`DepKind`], and the number of edge indices
+//! in the node and the number of bytes used to encode the edge indices for this node. The
+//! [`DepKind`], number of edges, and bytes per edge are all bit-packed together, if they fit.
+//! If the number of edges in this node does not fit in the bits available in the header, we
+//! store it directly after the header with leb128.
use super::query::DepGraphQuery;
-use super::{DepKind, DepNode, DepNodeIndex};
+use super::{DepKind, DepNode, DepNodeIndex, Deps};
+use crate::dep_graph::EdgesVec;
use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fingerprint::PackedFingerprint;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::sync::Lock;
+use rustc_data_structures::unhash::UnhashMap;
use rustc_index::{Idx, IndexVec};
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder, IntEncodedWithFixedSize, MemDecoder};
-use rustc_serialize::{Decodable, Decoder, Encodable};
-use smallvec::SmallVec;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use std::iter;
+use std::marker::PhantomData;
// The maximum value of `SerializedDepNodeIndex` leaves the upper two bits
// unused so that we can store multiple index types in `CompressedHybridIndex`,
@@ -31,26 +58,37 @@ rustc_index::newtype_index! {
pub struct SerializedDepNodeIndex {}
}
+const DEP_NODE_SIZE: usize = std::mem::size_of::<SerializedDepNodeIndex>();
+/// Amount of padding we need to add to the edge list data so that we can retrieve every
+/// SerializedDepNodeIndex with a fixed-size read then mask.
+const DEP_NODE_PAD: usize = DEP_NODE_SIZE - 1;
+/// Number of bits we need to store the number of used bytes in a SerializedDepNodeIndex.
+/// Note that wherever we encode byte widths like this we actually store the number of bytes used
+/// minus 1; for a 4-byte value we technically would have 5 widths to store, but using one byte to
+/// store zeroes (which are relatively rare) is a decent tradeoff to save a bit in our bitfields.
+const DEP_NODE_WIDTH_BITS: usize = DEP_NODE_SIZE / 2;
+
/// Data for use when recompiling the **current crate**.
#[derive(Debug)]
-pub struct SerializedDepGraph<K: DepKind> {
+pub struct SerializedDepGraph {
/// The set of all DepNodes in the graph
- nodes: IndexVec<SerializedDepNodeIndex, DepNode<K>>,
+ nodes: IndexVec<SerializedDepNodeIndex, DepNode>,
/// The set of all Fingerprints in the graph. Each Fingerprint corresponds to
/// the DepNode at the same index in the nodes vector.
fingerprints: IndexVec<SerializedDepNodeIndex, Fingerprint>,
/// For each DepNode, stores the list of edges originating from that
/// DepNode. Encoded as a [start, end) pair indexing into edge_list_data,
/// which holds the actual DepNodeIndices of the target nodes.
- edge_list_indices: IndexVec<SerializedDepNodeIndex, (u32, u32)>,
- /// A flattened list of all edge targets in the graph. Edge sources are
- /// implicit in edge_list_indices.
- edge_list_data: Vec<SerializedDepNodeIndex>,
- /// Reciprocal map to `nodes`.
- index: FxHashMap<DepNode<K>, SerializedDepNodeIndex>,
+ edge_list_indices: IndexVec<SerializedDepNodeIndex, EdgeHeader>,
+ /// A flattened list of all edge targets in the graph, stored in the same
+ /// varint encoding that we use on disk. Edge sources are implicit in edge_list_indices.
+ edge_list_data: Vec<u8>,
+ /// Stores a map from fingerprints to nodes per dep node kind.
+ /// This is the reciprocal of `nodes`.
+ index: Vec<UnhashMap<PackedFingerprint, SerializedDepNodeIndex>>,
}
-impl<K: DepKind> Default for SerializedDepGraph<K> {
+impl Default for SerializedDepGraph {
fn default() -> Self {
SerializedDepGraph {
nodes: Default::default(),
@@ -62,21 +100,47 @@ impl<K: DepKind> Default for SerializedDepGraph<K> {
}
}
-impl<K: DepKind> SerializedDepGraph<K> {
+impl SerializedDepGraph {
#[inline]
- pub fn edge_targets_from(&self, source: SerializedDepNodeIndex) -> &[SerializedDepNodeIndex] {
- let targets = self.edge_list_indices[source];
- &self.edge_list_data[targets.0 as usize..targets.1 as usize]
+ pub fn edge_targets_from(
+ &self,
+ source: SerializedDepNodeIndex,
+ ) -> impl Iterator<Item = SerializedDepNodeIndex> + '_ {
+ let header = self.edge_list_indices[source];
+ let mut raw = &self.edge_list_data[header.start()..];
+ // Figure out where the edge list for `source` ends by getting the start index of the next
+ // edge list, or the end of the array if this is the last edge.
+ let end = self
+ .edge_list_indices
+ .get(source + 1)
+ .map(|h| h.start())
+ .unwrap_or_else(|| self.edge_list_data.len() - DEP_NODE_PAD);
+
+ // The number of edges for this node is implicitly stored in the combination of the byte
+ // width and the length.
+ let bytes_per_index = header.bytes_per_index();
+ let len = (end - header.start()) / bytes_per_index;
+
+ // LLVM doesn't hoist EdgeHeader::mask so we do it ourselves.
+ let mask = header.mask();
+ (0..len).map(move |_| {
+ // Doing this slicing in this order ensures that the first bounds check suffices for
+ // all the others.
+ let index = &raw[..DEP_NODE_SIZE];
+ raw = &raw[bytes_per_index..];
+ let index = u32::from_le_bytes(index.try_into().unwrap()) & mask;
+ SerializedDepNodeIndex::from_u32(index)
+ })
}
#[inline]
- pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode<K> {
+ pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode {
self.nodes[dep_node_index]
}
#[inline]
- pub fn node_to_index_opt(&self, dep_node: &DepNode<K>) -> Option<SerializedDepNodeIndex> {
- self.index.get(dep_node).cloned()
+ pub fn node_to_index_opt(&self, dep_node: &DepNode) -> Option<SerializedDepNodeIndex> {
+ self.index.get(dep_node.kind.as_usize())?.get(&dep_node.hash).cloned()
}
#[inline]
@@ -84,16 +148,45 @@ impl<K: DepKind> SerializedDepGraph<K> {
self.fingerprints[dep_node_index]
}
+ #[inline]
pub fn node_count(&self) -> usize {
- self.index.len()
+ self.nodes.len()
+ }
+}
+
+/// A packed representation of an edge's start index and byte width.
+///
+/// This is packed by stealing 2 bits from the start index, which means we only accomodate edge
+/// data arrays up to a quarter of our address space. Which seems fine.
+#[derive(Debug, Clone, Copy)]
+struct EdgeHeader {
+ repr: usize,
+}
+
+impl EdgeHeader {
+ #[inline]
+ fn start(self) -> usize {
+ self.repr >> DEP_NODE_WIDTH_BITS
+ }
+
+ #[inline]
+ fn bytes_per_index(self) -> usize {
+ (self.repr & mask(DEP_NODE_WIDTH_BITS)) + 1
}
+
+ #[inline]
+ fn mask(self) -> u32 {
+ mask(self.bytes_per_index() * 8) as u32
+ }
+}
+
+fn mask(bits: usize) -> usize {
+ usize::MAX >> ((std::mem::size_of::<usize>() * 8) - bits)
}
-impl<'a, K: DepKind + Decodable<MemDecoder<'a>>> Decodable<MemDecoder<'a>>
- for SerializedDepGraph<K>
-{
+impl SerializedDepGraph {
#[instrument(level = "debug", skip(d))]
- fn decode(d: &mut MemDecoder<'a>) -> SerializedDepGraph<K> {
+ pub fn decode<D: Deps>(d: &mut MemDecoder<'_>) -> SerializedDepGraph {
// The last 16 bytes are the node count and edge count.
debug!("position: {:?}", d.position());
let (node_count, edge_count) =
@@ -107,76 +200,261 @@ impl<'a, K: DepKind + Decodable<MemDecoder<'a>>> Decodable<MemDecoder<'a>>
debug!(?node_count, ?edge_count);
+ let graph_bytes = d.len() - (2 * IntEncodedWithFixedSize::ENCODED_SIZE) - d.position();
+
let mut nodes = IndexVec::with_capacity(node_count);
let mut fingerprints = IndexVec::with_capacity(node_count);
let mut edge_list_indices = IndexVec::with_capacity(node_count);
- let mut edge_list_data = Vec::with_capacity(edge_count);
+ // This estimation assumes that all of the encoded bytes are for the edge lists or for the
+ // fixed-size node headers. But that's not necessarily true; if any edge list has a length
+ // that spills out of the size we can bit-pack into SerializedNodeHeader then some of the
+ // total serialized size is also used by leb128-encoded edge list lengths. Neglecting that
+ // contribution to graph_bytes means our estimation of the bytes needed for edge_list_data
+ // slightly overshoots. But it cannot overshoot by much; consider that the worse case is
+ // for a node with length 64, which means the spilled 1-byte leb128 length is 1 byte of at
+ // least (34 byte header + 1 byte len + 64 bytes edge data), which is ~1%. A 2-byte leb128
+ // length is about the same fractional overhead and it amortizes for yet greater lengths.
+ let mut edge_list_data = Vec::with_capacity(
+ graph_bytes - node_count * std::mem::size_of::<SerializedNodeHeader<D>>(),
+ );
for _index in 0..node_count {
- let dep_node: DepNode<K> = Decodable::decode(d);
- let _i: SerializedDepNodeIndex = nodes.push(dep_node);
+ // Decode the header for this edge; the header packs together as many of the fixed-size
+ // fields as possible to limit the number of times we update decoder state.
+ let node_header =
+ SerializedNodeHeader::<D> { bytes: d.read_array(), _marker: PhantomData };
+
+ let _i: SerializedDepNodeIndex = nodes.push(node_header.node());
debug_assert_eq!(_i.index(), _index);
- let fingerprint: Fingerprint = Decodable::decode(d);
- let _i: SerializedDepNodeIndex = fingerprints.push(fingerprint);
+ let _i: SerializedDepNodeIndex = fingerprints.push(node_header.fingerprint());
debug_assert_eq!(_i.index(), _index);
- // Deserialize edges -- sequence of DepNodeIndex
- let len = d.read_usize();
- let start = edge_list_data.len().try_into().unwrap();
- for _ in 0..len {
- let edge = Decodable::decode(d);
- edge_list_data.push(edge);
- }
- let end = edge_list_data.len().try_into().unwrap();
- let _i: SerializedDepNodeIndex = edge_list_indices.push((start, end));
+ // If the length of this node's edge list is small, the length is stored in the header.
+ // If it is not, we fall back to another decoder call.
+ let num_edges = node_header.len().unwrap_or_else(|| d.read_usize());
+
+ // The edges index list uses the same varint strategy as rmeta tables; we select the
+ // number of byte elements per-array not per-element. This lets us read the whole edge
+ // list for a node with one decoder call and also use the on-disk format in memory.
+ let edges_len_bytes = node_header.bytes_per_index() * num_edges;
+ // The in-memory structure for the edges list stores the byte width of the edges on
+ // this node with the offset into the global edge data array.
+ let edges_header = node_header.edges_header(&edge_list_data);
+
+ edge_list_data.extend(d.read_raw_bytes(edges_len_bytes));
+
+ let _i: SerializedDepNodeIndex = edge_list_indices.push(edges_header);
debug_assert_eq!(_i.index(), _index);
}
- let index: FxHashMap<_, _> =
- nodes.iter_enumerated().map(|(idx, &dep_node)| (dep_node, idx)).collect();
+ // When we access the edge list data, we do a fixed-size read from the edge list data then
+ // mask off the bytes that aren't for that edge index, so the last read may dangle off the
+ // end of the array. This padding ensure it doesn't.
+ edge_list_data.extend(&[0u8; DEP_NODE_PAD]);
+
+ // Read the number of each dep kind and use it to create an hash map with a suitable size.
+ let mut index: Vec<_> = (0..(D::DEP_KIND_MAX + 1))
+ .map(|_| UnhashMap::with_capacity_and_hasher(d.read_u32() as usize, Default::default()))
+ .collect();
+
+ for (idx, node) in nodes.iter_enumerated() {
+ index[node.kind.as_usize()].insert(node.hash, idx);
+ }
SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data, index }
}
}
-#[derive(Debug, Encodable, Decodable)]
-pub struct NodeInfo<K: DepKind> {
- node: DepNode<K>,
+/// A packed representation of all the fixed-size fields in a `NodeInfo`.
+///
+/// This stores in one byte array:
+/// * The `Fingerprint` in the `NodeInfo`
+/// * The `Fingerprint` in `DepNode` that is in this `NodeInfo`
+/// * The `DepKind`'s discriminant (a u16, but not all bits are used...)
+/// * The byte width of the encoded edges for this node
+/// * In whatever bits remain, the length of the edge list for this node, if it fits
+struct SerializedNodeHeader<D> {
+ // 2 bytes for the DepNode
+ // 16 for Fingerprint in DepNode
+ // 16 for Fingerprint in NodeInfo
+ bytes: [u8; 34],
+ _marker: PhantomData<D>,
+}
+
+// The fields of a `SerializedNodeHeader`, this struct is an implementation detail and exists only
+// to make the implementation of `SerializedNodeHeader` simpler.
+struct Unpacked {
+ len: Option<usize>,
+ bytes_per_index: usize,
+ kind: DepKind,
+ hash: PackedFingerprint,
fingerprint: Fingerprint,
- edges: SmallVec<[DepNodeIndex; 8]>,
}
-struct Stat<K: DepKind> {
- kind: K,
+// Bit fields, where
+// M: bits used to store the length of a node's edge list
+// N: bits used to store the byte width of elements of the edge list
+// are
+// 0..M length of the edge
+// M..M+N bytes per index
+// M+N..16 kind
+impl<D: Deps> SerializedNodeHeader<D> {
+ const TOTAL_BITS: usize = std::mem::size_of::<DepKind>() * 8;
+ const LEN_BITS: usize = Self::TOTAL_BITS - Self::KIND_BITS - Self::WIDTH_BITS;
+ const WIDTH_BITS: usize = DEP_NODE_WIDTH_BITS;
+ const KIND_BITS: usize = Self::TOTAL_BITS - D::DEP_KIND_MAX.leading_zeros() as usize;
+ const MAX_INLINE_LEN: usize = (u16::MAX as usize >> (Self::TOTAL_BITS - Self::LEN_BITS)) - 1;
+
+ #[inline]
+ fn new(node_info: &NodeInfo) -> Self {
+ debug_assert_eq!(Self::TOTAL_BITS, Self::LEN_BITS + Self::WIDTH_BITS + Self::KIND_BITS);
+
+ let NodeInfo { node, fingerprint, edges } = node_info;
+
+ let mut head = node.kind.as_inner();
+
+ let free_bytes = edges.max_index().leading_zeros() as usize / 8;
+ let bytes_per_index = (DEP_NODE_SIZE - free_bytes).saturating_sub(1);
+ head |= (bytes_per_index as u16) << Self::KIND_BITS;
+
+ // Encode number of edges + 1 so that we can reserve 0 to indicate that the len doesn't fit
+ // in this bitfield.
+ if edges.len() <= Self::MAX_INLINE_LEN {
+ head |= (edges.len() as u16 + 1) << (Self::KIND_BITS + Self::WIDTH_BITS);
+ }
+
+ let hash: Fingerprint = node.hash.into();
+
+ // Using half-open ranges ensures an unconditional panic if we get the magic numbers wrong.
+ let mut bytes = [0u8; 34];
+ bytes[..2].copy_from_slice(&head.to_le_bytes());
+ bytes[2..18].copy_from_slice(&hash.to_le_bytes());
+ bytes[18..].copy_from_slice(&fingerprint.to_le_bytes());
+
+ #[cfg(debug_assertions)]
+ {
+ let res = Self { bytes, _marker: PhantomData };
+ assert_eq!(node_info.fingerprint, res.fingerprint());
+ assert_eq!(node_info.node, res.node());
+ if let Some(len) = res.len() {
+ assert_eq!(node_info.edges.len(), len);
+ }
+ }
+ Self { bytes, _marker: PhantomData }
+ }
+
+ #[inline]
+ fn unpack(&self) -> Unpacked {
+ let head = u16::from_le_bytes(self.bytes[..2].try_into().unwrap());
+ let hash = self.bytes[2..18].try_into().unwrap();
+ let fingerprint = self.bytes[18..].try_into().unwrap();
+
+ let kind = head & mask(Self::KIND_BITS) as u16;
+ let bytes_per_index = (head >> Self::KIND_BITS) & mask(Self::WIDTH_BITS) as u16;
+ let len = (head as usize) >> (Self::WIDTH_BITS + Self::KIND_BITS);
+
+ Unpacked {
+ len: len.checked_sub(1),
+ bytes_per_index: bytes_per_index as usize + 1,
+ kind: DepKind::new(kind),
+ hash: Fingerprint::from_le_bytes(hash).into(),
+ fingerprint: Fingerprint::from_le_bytes(fingerprint),
+ }
+ }
+
+ #[inline]
+ fn len(&self) -> Option<usize> {
+ self.unpack().len
+ }
+
+ #[inline]
+ fn bytes_per_index(&self) -> usize {
+ self.unpack().bytes_per_index
+ }
+
+ #[inline]
+ fn fingerprint(&self) -> Fingerprint {
+ self.unpack().fingerprint
+ }
+
+ #[inline]
+ fn node(&self) -> DepNode {
+ let Unpacked { kind, hash, .. } = self.unpack();
+ DepNode { kind, hash }
+ }
+
+ #[inline]
+ fn edges_header(&self, edge_list_data: &[u8]) -> EdgeHeader {
+ EdgeHeader {
+ repr: (edge_list_data.len() << DEP_NODE_WIDTH_BITS) | (self.bytes_per_index() - 1),
+ }
+ }
+}
+
+#[derive(Debug)]
+struct NodeInfo {
+ node: DepNode,
+ fingerprint: Fingerprint,
+ edges: EdgesVec,
+}
+
+impl NodeInfo {
+ fn encode<D: Deps>(&self, e: &mut FileEncoder) {
+ let header = SerializedNodeHeader::<D>::new(self);
+ e.write_array(header.bytes);
+
+ if header.len().is_none() {
+ e.emit_usize(self.edges.len());
+ }
+
+ let bytes_per_index = header.bytes_per_index();
+ for node_index in self.edges.iter() {
+ e.write_with(|dest| {
+ *dest = node_index.as_u32().to_le_bytes();
+ bytes_per_index
+ });
+ }
+ }
+}
+
+struct Stat {
+ kind: DepKind,
node_counter: u64,
edge_counter: u64,
}
-struct EncoderState<K: DepKind> {
+struct EncoderState<D: Deps> {
encoder: FileEncoder,
total_node_count: usize,
total_edge_count: usize,
- stats: Option<FxHashMap<K, Stat<K>>>,
+ stats: Option<FxHashMap<DepKind, Stat>>,
+
+ /// Stores the number of times we've encoded each dep kind.
+ kind_stats: Vec<u32>,
+ marker: PhantomData<D>,
}
-impl<K: DepKind> EncoderState<K> {
+impl<D: Deps> EncoderState<D> {
fn new(encoder: FileEncoder, record_stats: bool) -> Self {
Self {
encoder,
total_edge_count: 0,
total_node_count: 0,
stats: record_stats.then(FxHashMap::default),
+ kind_stats: iter::repeat(0).take(D::DEP_KIND_MAX as usize + 1).collect(),
+ marker: PhantomData,
}
}
fn encode_node(
&mut self,
- node: &NodeInfo<K>,
- record_graph: &Option<Lock<DepGraphQuery<K>>>,
+ node: &NodeInfo,
+ record_graph: &Option<Lock<DepGraphQuery>>,
) -> DepNodeIndex {
let index = DepNodeIndex::new(self.total_node_count);
self.total_node_count += 1;
+ self.kind_stats[node.node.kind.as_usize()] += 1;
let edge_count = node.edges.len();
self.total_edge_count += edge_count;
@@ -197,16 +475,28 @@ impl<K: DepKind> EncoderState<K> {
}
let encoder = &mut self.encoder;
- node.encode(encoder);
+ node.encode::<D>(encoder);
index
}
fn finish(self, profiler: &SelfProfilerRef) -> FileEncodeResult {
- let Self { mut encoder, total_node_count, total_edge_count, stats: _ } = self;
+ let Self {
+ mut encoder,
+ total_node_count,
+ total_edge_count,
+ stats: _,
+ kind_stats,
+ marker: _,
+ } = self;
let node_count = total_node_count.try_into().unwrap();
let edge_count = total_edge_count.try_into().unwrap();
+ // Encode the number of each dep kind encountered
+ for count in kind_stats.iter() {
+ count.encode(&mut encoder);
+ }
+
debug!(?node_count, ?edge_count);
debug!("position: {:?}", encoder.position());
IntEncodedWithFixedSize(node_count).encode(&mut encoder);
@@ -223,12 +513,12 @@ impl<K: DepKind> EncoderState<K> {
}
}
-pub struct GraphEncoder<K: DepKind> {
- status: Lock<EncoderState<K>>,
- record_graph: Option<Lock<DepGraphQuery<K>>>,
+pub struct GraphEncoder<D: Deps> {
+ status: Lock<EncoderState<D>>,
+ record_graph: Option<Lock<DepGraphQuery>>,
}
-impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
+impl<D: Deps> GraphEncoder<D> {
pub fn new(
encoder: FileEncoder,
prev_node_count: usize,
@@ -240,7 +530,7 @@ impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
GraphEncoder { status, record_graph }
}
- pub(crate) fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) {
+ pub(crate) fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
if let Some(record_graph) = &self.record_graph {
f(&record_graph.lock())
}
@@ -301,9 +591,9 @@ impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
pub(crate) fn send(
&self,
profiler: &SelfProfilerRef,
- node: DepNode<K>,
+ node: DepNode,
fingerprint: Fingerprint,
- edges: SmallVec<[DepNodeIndex; 8]>,
+ edges: EdgesVec,
) -> DepNodeIndex {
let _prof_timer = profiler.generic_activity("incr_comp_encode_dep_graph");
let node = NodeInfo { node, fingerprint, edges };
diff --git a/compiler/rustc_query_system/src/ich/impls_syntax.rs b/compiler/rustc_query_system/src/ich/impls_syntax.rs
index e673d5b8c..b2177be0e 100644
--- a/compiler/rustc_query_system/src/ich/impls_syntax.rs
+++ b/compiler/rustc_query_system/src/ich/impls_syntax.rs
@@ -5,7 +5,7 @@ use crate::ich::StableHashingContext;
use rustc_ast as ast;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
-use rustc_span::{BytePos, NormalizedPos, SourceFile};
+use rustc_span::SourceFile;
use std::assert_matches::assert_matches;
use smallvec::SmallVec;
@@ -67,8 +67,8 @@ impl<'a> HashStable<StableHashingContext<'a>> for SourceFile {
src: _,
ref src_hash,
external_src: _,
- start_pos,
- end_pos: _,
+ start_pos: _,
+ source_len: _,
lines: _,
ref multibyte_chars,
ref non_narrow_chars,
@@ -79,62 +79,37 @@ impl<'a> HashStable<StableHashingContext<'a>> for SourceFile {
src_hash.hash_stable(hcx, hasher);
- // We are always in `Lines` form by the time we reach here.
- assert!(self.lines.borrow().is_lines());
- self.lines(|lines| {
+ {
+ // We are always in `Lines` form by the time we reach here.
+ assert!(self.lines.read().is_lines());
+ let lines = self.lines();
// We only hash the relative position within this source_file
lines.len().hash_stable(hcx, hasher);
for &line in lines.iter() {
- stable_byte_pos(line, start_pos).hash_stable(hcx, hasher);
+ line.hash_stable(hcx, hasher);
}
- });
+ }
// We only hash the relative position within this source_file
multibyte_chars.len().hash_stable(hcx, hasher);
for &char_pos in multibyte_chars.iter() {
- stable_multibyte_char(char_pos, start_pos).hash_stable(hcx, hasher);
+ char_pos.hash_stable(hcx, hasher);
}
non_narrow_chars.len().hash_stable(hcx, hasher);
for &char_pos in non_narrow_chars.iter() {
- stable_non_narrow_char(char_pos, start_pos).hash_stable(hcx, hasher);
+ char_pos.hash_stable(hcx, hasher);
}
normalized_pos.len().hash_stable(hcx, hasher);
for &char_pos in normalized_pos.iter() {
- stable_normalized_pos(char_pos, start_pos).hash_stable(hcx, hasher);
+ char_pos.hash_stable(hcx, hasher);
}
cnum.hash_stable(hcx, hasher);
}
}
-fn stable_byte_pos(pos: BytePos, source_file_start: BytePos) -> u32 {
- pos.0 - source_file_start.0
-}
-
-fn stable_multibyte_char(mbc: rustc_span::MultiByteChar, source_file_start: BytePos) -> (u32, u32) {
- let rustc_span::MultiByteChar { pos, bytes } = mbc;
-
- (pos.0 - source_file_start.0, bytes as u32)
-}
-
-fn stable_non_narrow_char(
- swc: rustc_span::NonNarrowChar,
- source_file_start: BytePos,
-) -> (u32, u32) {
- let pos = swc.pos();
- let width = swc.width();
-
- (pos.0 - source_file_start.0, width as u32)
-}
-
-fn stable_normalized_pos(np: NormalizedPos, source_file_start: BytePos) -> (u32, u32) {
- let NormalizedPos { pos, diff } = np;
-
- (pos.0 - source_file_start.0, diff)
-}
-
impl<'tcx> HashStable<StableHashingContext<'tcx>> for rustc_feature::Features {
fn hash_stable(&self, hcx: &mut StableHashingContext<'tcx>, hasher: &mut StableHasher) {
// Unfortunately we cannot exhaustively list fields here, since the
diff --git a/compiler/rustc_query_system/src/lib.rs b/compiler/rustc_query_system/src/lib.rs
index 8c9e9cfad..1944ac443 100644
--- a/compiler/rustc_query_system/src/lib.rs
+++ b/compiler/rustc_query_system/src/lib.rs
@@ -4,6 +4,7 @@
#![feature(min_specialization)]
#![feature(extern_types)]
#![feature(let_chains)]
+#![feature(inline_const)]
#![allow(rustc::potential_query_instability)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs
index 4ba9d53a9..0240f012d 100644
--- a/compiler/rustc_query_system/src/query/caches.rs
+++ b/compiler/rustc_query_system/src/query/caches.rs
@@ -2,7 +2,7 @@ use crate::dep_graph::DepNodeIndex;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sharded::{self, Sharded};
-use rustc_data_structures::sync::Lock;
+use rustc_data_structures::sync::OnceLock;
use rustc_index::{Idx, IndexVec};
use std::fmt::Debug;
use std::hash::Hash;
@@ -55,7 +55,7 @@ where
#[inline(always)]
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
let key_hash = sharded::make_hash(key);
- let lock = self.cache.get_shard_by_hash(key_hash).lock();
+ let lock = self.cache.lock_shard_by_hash(key_hash);
let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
if let Some((_, value)) = result { Some(*value) } else { None }
@@ -63,15 +63,14 @@ where
#[inline]
fn complete(&self, key: K, value: V, index: DepNodeIndex) {
- let mut lock = self.cache.get_shard_by_value(&key).lock();
+ let mut lock = self.cache.lock_shard_by_value(&key);
// We may be overwriting another value. This is all right, since the dep-graph
// will check that the fingerprint matches.
lock.insert(key, (value, index));
}
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
- let shards = self.cache.lock_shards();
- for shard in shards.iter() {
+ for shard in self.cache.lock_shards() {
for (k, v) in shard.iter() {
f(k, &v.0, v.1);
}
@@ -88,12 +87,12 @@ impl<'tcx, V: 'tcx> CacheSelector<'tcx, V> for SingleCacheSelector {
}
pub struct SingleCache<V> {
- cache: Lock<Option<(V, DepNodeIndex)>>,
+ cache: OnceLock<(V, DepNodeIndex)>,
}
impl<V> Default for SingleCache<V> {
fn default() -> Self {
- SingleCache { cache: Lock::new(None) }
+ SingleCache { cache: OnceLock::new() }
}
}
@@ -106,16 +105,16 @@ where
#[inline(always)]
fn lookup(&self, _key: &()) -> Option<(V, DepNodeIndex)> {
- *self.cache.lock()
+ self.cache.get().copied()
}
#[inline]
fn complete(&self, _key: (), value: V, index: DepNodeIndex) {
- *self.cache.lock() = Some((value, index));
+ self.cache.set((value, index)).ok();
}
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
- if let Some(value) = self.cache.lock().as_ref() {
+ if let Some(value) = self.cache.get() {
f(&(), &value.0, value.1)
}
}
@@ -149,19 +148,18 @@ where
#[inline(always)]
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
- let lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
+ let lock = self.cache.lock_shard_by_hash(key.index() as u64);
if let Some(Some(value)) = lock.get(*key) { Some(*value) } else { None }
}
#[inline]
fn complete(&self, key: K, value: V, index: DepNodeIndex) {
- let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
+ let mut lock = self.cache.lock_shard_by_hash(key.index() as u64);
lock.insert(key, (value, index));
}
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
- let shards = self.cache.lock_shards();
- for shard in shards.iter() {
+ for shard in self.cache.lock_shards() {
for (k, v) in shard.iter_enumerated() {
if let Some(v) = v {
f(&k, &v.0, v.1);
diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs
index 7e47d7012..c025fac26 100644
--- a/compiler/rustc_query_system/src/query/config.rs
+++ b/compiler/rustc_query_system/src/query/config.rs
@@ -1,6 +1,6 @@
//! Query configuration and description traits.
-use crate::dep_graph::{DepNode, DepNodeParams, SerializedDepNodeIndex};
+use crate::dep_graph::{DepKind, DepNode, DepNodeParams, SerializedDepNodeIndex};
use crate::error::HandleCycleError;
use crate::ich::StableHashingContext;
use crate::query::caches::QueryCache;
@@ -8,6 +8,7 @@ use crate::query::DepNodeIndex;
use crate::query::{QueryContext, QueryInfo, QueryState};
use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_span::ErrorGuaranteed;
use std::fmt::Debug;
use std::hash::Hash;
@@ -26,7 +27,7 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
fn format_value(self) -> fn(&Self::Value) -> String;
// Don't use this method to access query results, instead use the methods on TyCtxt
- fn query_state<'a>(self, tcx: Qcx) -> &'a QueryState<Self::Key, Qcx::DepKind>
+ fn query_state<'a>(self, tcx: Qcx) -> &'a QueryState<Self::Key>
where
Qcx: 'a;
@@ -56,7 +57,8 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
fn value_from_cycle_error(
self,
tcx: Qcx::DepContext,
- cycle: &[QueryInfo<Qcx::DepKind>],
+ cycle: &[QueryInfo],
+ guar: ErrorGuaranteed,
) -> Self::Value;
fn anon(self) -> bool;
@@ -64,12 +66,12 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
fn depth_limit(self) -> bool;
fn feedable(self) -> bool;
- fn dep_kind(self) -> Qcx::DepKind;
+ fn dep_kind(self) -> DepKind;
fn handle_cycle_error(self) -> HandleCycleError;
fn hash_result(self) -> HashResult<Self::Value>;
// Just here for convenience and checking that the key matches the kind, don't override this.
- fn construct_dep_node(self, tcx: Qcx::DepContext, key: &Self::Key) -> DepNode<Qcx::DepKind> {
+ fn construct_dep_node(self, tcx: Qcx::DepContext, key: &Self::Key) -> DepNode {
DepNode::construct(tcx, self.dep_kind(), key)
}
}
diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs
index 1b1248924..f2c1f84fc 100644
--- a/compiler/rustc_query_system/src/query/job.rs
+++ b/compiler/rustc_query_system/src/query/job.rs
@@ -1,9 +1,8 @@
-use crate::dep_graph::DepKind;
+use crate::dep_graph::DepContext;
use crate::error::CycleStack;
use crate::query::plumbing::CycleError;
+use crate::query::DepKind;
use crate::query::{QueryContext, QueryStackFrame};
-use core::marker::PhantomData;
-
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{
Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, IntoDiagnostic, Level,
@@ -30,48 +29,48 @@ use {
/// Represents a span and a query key.
#[derive(Clone, Debug)]
-pub struct QueryInfo<D: DepKind> {
+pub struct QueryInfo {
/// The span corresponding to the reason for which this query was required.
pub span: Span,
- pub query: QueryStackFrame<D>,
+ pub query: QueryStackFrame,
}
-pub type QueryMap<D> = FxHashMap<QueryJobId, QueryJobInfo<D>>;
+pub type QueryMap = FxHashMap<QueryJobId, QueryJobInfo>;
/// A value uniquely identifying an active query job.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub struct QueryJobId(pub NonZeroU64);
impl QueryJobId {
- fn query<D: DepKind>(self, map: &QueryMap<D>) -> QueryStackFrame<D> {
+ fn query(self, map: &QueryMap) -> QueryStackFrame {
map.get(&self).unwrap().query.clone()
}
#[cfg(parallel_compiler)]
- fn span<D: DepKind>(self, map: &QueryMap<D>) -> Span {
+ fn span(self, map: &QueryMap) -> Span {
map.get(&self).unwrap().job.span
}
#[cfg(parallel_compiler)]
- fn parent<D: DepKind>(self, map: &QueryMap<D>) -> Option<QueryJobId> {
+ fn parent(self, map: &QueryMap) -> Option<QueryJobId> {
map.get(&self).unwrap().job.parent
}
#[cfg(parallel_compiler)]
- fn latch<D: DepKind>(self, map: &QueryMap<D>) -> Option<&QueryLatch<D>> {
+ fn latch(self, map: &QueryMap) -> Option<&QueryLatch> {
map.get(&self).unwrap().job.latch.as_ref()
}
}
#[derive(Clone)]
-pub struct QueryJobInfo<D: DepKind> {
- pub query: QueryStackFrame<D>,
- pub job: QueryJob<D>,
+pub struct QueryJobInfo {
+ pub query: QueryStackFrame,
+ pub job: QueryJob,
}
/// Represents an active query job.
#[derive(Clone)]
-pub struct QueryJob<D: DepKind> {
+pub struct QueryJob {
pub id: QueryJobId,
/// The span corresponding to the reason for which this query was required.
@@ -82,11 +81,10 @@ pub struct QueryJob<D: DepKind> {
/// The latch that is used to wait on this job.
#[cfg(parallel_compiler)]
- latch: Option<QueryLatch<D>>,
- spooky: core::marker::PhantomData<D>,
+ latch: Option<QueryLatch>,
}
-impl<D: DepKind> QueryJob<D> {
+impl QueryJob {
/// Creates a new query job.
#[inline]
pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
@@ -96,12 +94,11 @@ impl<D: DepKind> QueryJob<D> {
parent,
#[cfg(parallel_compiler)]
latch: None,
- spooky: PhantomData,
}
}
#[cfg(parallel_compiler)]
- pub(super) fn latch(&mut self) -> QueryLatch<D> {
+ pub(super) fn latch(&mut self) -> QueryLatch {
if self.latch.is_none() {
self.latch = Some(QueryLatch::new());
}
@@ -124,13 +121,12 @@ impl<D: DepKind> QueryJob<D> {
}
impl QueryJobId {
- #[cfg(not(parallel_compiler))]
- pub(super) fn find_cycle_in_stack<D: DepKind>(
+ pub(super) fn find_cycle_in_stack(
&self,
- query_map: QueryMap<D>,
+ query_map: QueryMap,
current_job: &Option<QueryJobId>,
span: Span,
- ) -> CycleError<D> {
+ ) -> CycleError {
// Find the waitee amongst `current_job` parents
let mut cycle = Vec::new();
let mut current_job = Option::clone(current_job);
@@ -164,18 +160,18 @@ impl QueryJobId {
#[cold]
#[inline(never)]
- pub fn try_find_layout_root<D: DepKind>(
+ pub fn try_find_layout_root(
&self,
- query_map: QueryMap<D>,
- ) -> Option<(QueryJobInfo<D>, usize)> {
+ query_map: QueryMap,
+ layout_of_kind: DepKind,
+ ) -> Option<(QueryJobInfo, usize)> {
let mut last_layout = None;
let mut current_id = Some(*self);
let mut depth = 0;
while let Some(id) = current_id {
let info = query_map.get(&id).unwrap();
- // FIXME: This string comparison should probably not be done.
- if format!("{:?}", info.query.dep_kind) == "layout_of" {
+ if info.query.dep_kind == layout_of_kind {
depth += 1;
last_layout = Some((info.clone(), depth));
}
@@ -186,15 +182,15 @@ impl QueryJobId {
}
#[cfg(parallel_compiler)]
-struct QueryWaiter<D: DepKind> {
+struct QueryWaiter {
query: Option<QueryJobId>,
condvar: Condvar,
span: Span,
- cycle: Mutex<Option<CycleError<D>>>,
+ cycle: Mutex<Option<CycleError>>,
}
#[cfg(parallel_compiler)]
-impl<D: DepKind> QueryWaiter<D> {
+impl QueryWaiter {
fn notify(&self, registry: &rayon_core::Registry) {
rayon_core::mark_unblocked(registry);
self.condvar.notify_one();
@@ -202,19 +198,19 @@ impl<D: DepKind> QueryWaiter<D> {
}
#[cfg(parallel_compiler)]
-struct QueryLatchInfo<D: DepKind> {
+struct QueryLatchInfo {
complete: bool,
- waiters: Vec<Arc<QueryWaiter<D>>>,
+ waiters: Vec<Arc<QueryWaiter>>,
}
#[cfg(parallel_compiler)]
#[derive(Clone)]
-pub(super) struct QueryLatch<D: DepKind> {
- info: Arc<Mutex<QueryLatchInfo<D>>>,
+pub(super) struct QueryLatch {
+ info: Arc<Mutex<QueryLatchInfo>>,
}
#[cfg(parallel_compiler)]
-impl<D: DepKind> QueryLatch<D> {
+impl QueryLatch {
fn new() -> Self {
QueryLatch {
info: Arc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
@@ -222,11 +218,7 @@ impl<D: DepKind> QueryLatch<D> {
}
/// Awaits for the query job to complete.
- pub(super) fn wait_on(
- &self,
- query: Option<QueryJobId>,
- span: Span,
- ) -> Result<(), CycleError<D>> {
+ pub(super) fn wait_on(&self, query: Option<QueryJobId>, span: Span) -> Result<(), CycleError> {
let waiter =
Arc::new(QueryWaiter { query, span, cycle: Mutex::new(None), condvar: Condvar::new() });
self.wait_on_inner(&waiter);
@@ -241,7 +233,7 @@ impl<D: DepKind> QueryLatch<D> {
}
/// Awaits the caller on this latch by blocking the current thread.
- fn wait_on_inner(&self, waiter: &Arc<QueryWaiter<D>>) {
+ fn wait_on_inner(&self, waiter: &Arc<QueryWaiter>) {
let mut info = self.info.lock();
if !info.complete {
// We push the waiter on to the `waiters` list. It can be accessed inside
@@ -275,7 +267,7 @@ impl<D: DepKind> QueryLatch<D> {
/// Removes a single waiter from the list of waiters.
/// This is used to break query cycles.
- fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter<D>> {
+ fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter> {
let mut info = self.info.lock();
debug_assert!(!info.complete);
// Remove the waiter from the list of waiters
@@ -297,14 +289,9 @@ type Waiter = (QueryJobId, usize);
/// required information to resume the waiter.
/// If all `visit` calls returns None, this function also returns None.
#[cfg(parallel_compiler)]
-fn visit_waiters<F, D>(
- query_map: &QueryMap<D>,
- query: QueryJobId,
- mut visit: F,
-) -> Option<Option<Waiter>>
+fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>>
where
F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
- D: DepKind,
{
// Visit the parent query which is a non-resumable waiter since it's on the same stack
if let Some(parent) = query.parent(query_map) {
@@ -333,8 +320,8 @@ where
/// If a cycle is detected, this initial value is replaced with the span causing
/// the cycle.
#[cfg(parallel_compiler)]
-fn cycle_check<D: DepKind>(
- query_map: &QueryMap<D>,
+fn cycle_check(
+ query_map: &QueryMap,
query: QueryJobId,
span: Span,
stack: &mut Vec<(Span, QueryJobId)>,
@@ -374,8 +361,8 @@ fn cycle_check<D: DepKind>(
/// from `query` without going through any of the queries in `visited`.
/// This is achieved with a depth first search.
#[cfg(parallel_compiler)]
-fn connected_to_root<D: DepKind>(
- query_map: &QueryMap<D>,
+fn connected_to_root(
+ query_map: &QueryMap,
query: QueryJobId,
visited: &mut FxHashSet<QueryJobId>,
) -> bool {
@@ -397,10 +384,9 @@ fn connected_to_root<D: DepKind>(
// Deterministically pick an query from a list
#[cfg(parallel_compiler)]
-fn pick_query<'a, T, F, D>(query_map: &QueryMap<D>, queries: &'a [T], f: F) -> &'a T
+fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T
where
F: Fn(&T) -> (Span, QueryJobId),
- D: DepKind,
{
// Deterministically pick an entry point
// FIXME: Sort this instead
@@ -424,10 +410,10 @@ where
/// If a cycle was not found, the starting query is removed from `jobs` and
/// the function returns false.
#[cfg(parallel_compiler)]
-fn remove_cycle<D: DepKind>(
- query_map: &QueryMap<D>,
+fn remove_cycle(
+ query_map: &QueryMap,
jobs: &mut Vec<QueryJobId>,
- wakelist: &mut Vec<Arc<QueryWaiter<D>>>,
+ wakelist: &mut Vec<Arc<QueryWaiter>>,
) -> bool {
let mut visited = FxHashSet::default();
let mut stack = Vec::new();
@@ -529,7 +515,7 @@ fn remove_cycle<D: DepKind>(
/// There may be multiple cycles involved in a deadlock, so this searches
/// all active queries for cycles before finally resuming all the waiters at once.
#[cfg(parallel_compiler)]
-pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Registry) {
+pub fn deadlock(query_map: QueryMap, registry: &rayon_core::Registry) {
let on_panic = defer(|| {
eprintln!("deadlock handler panicked, aborting process");
process::abort();
@@ -553,7 +539,9 @@ pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Regis
// which in turn will wait on X causing a deadlock. We have a false dependency from
// X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
// only considers the true dependency and won't detect a cycle.
- assert!(found_cycle);
+ if !found_cycle {
+ panic!("deadlock detected");
+ }
// FIXME: Ensure this won't cause a deadlock before we return
for waiter in wakelist.into_iter() {
@@ -565,9 +553,9 @@ pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Regis
#[inline(never)]
#[cold]
-pub(crate) fn report_cycle<'a, D: DepKind>(
+pub(crate) fn report_cycle<'a>(
sess: &'a Session,
- CycleError { usage, cycle: stack }: &CycleError<D>,
+ CycleError { usage, cycle: stack }: &CycleError,
) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
assert!(!stack.is_empty());
@@ -592,9 +580,7 @@ pub(crate) fn report_cycle<'a, D: DepKind>(
});
}
- let alias = if stack
- .iter()
- .all(|entry| matches!(entry.query.def_kind, Some(DefKind::TyAlias { .. })))
+ let alias = if stack.iter().all(|entry| matches!(entry.query.def_kind, Some(DefKind::TyAlias)))
{
Some(crate::error::Alias::Ty)
} else if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TraitAlias)) {
@@ -654,8 +640,10 @@ pub fn print_query_stack<Qcx: QueryContext>(
if let Some(ref mut file) = file {
let _ = writeln!(
file,
- "#{} [{:?}] {}",
- count_total, query_info.query.dep_kind, query_info.query.description
+ "#{} [{}] {}",
+ count_total,
+ qcx.dep_context().dep_kind_info(query_info.query.dep_kind).name,
+ query_info.query.description
);
}
diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs
index f7619d75b..05dee9f12 100644
--- a/compiler/rustc_query_system/src/query/mod.rs
+++ b/compiler/rustc_query_system/src/query/mod.rs
@@ -28,27 +28,27 @@ use thin_vec::ThinVec;
///
/// This is mostly used in case of cycles for error reporting.
#[derive(Clone, Debug)]
-pub struct QueryStackFrame<D: DepKind> {
+pub struct QueryStackFrame {
pub description: String,
span: Option<Span>,
pub def_id: Option<DefId>,
pub def_kind: Option<DefKind>,
pub ty_adt_id: Option<DefId>,
- pub dep_kind: D,
+ pub dep_kind: DepKind,
/// This hash is used to deterministically pick
/// a query to remove cycles in the parallel compiler.
#[cfg(parallel_compiler)]
hash: Hash64,
}
-impl<D: DepKind> QueryStackFrame<D> {
+impl QueryStackFrame {
#[inline]
pub fn new(
description: String,
span: Option<Span>,
def_id: Option<DefId>,
def_kind: Option<DefKind>,
- dep_kind: D,
+ dep_kind: DepKind,
ty_adt_id: Option<DefId>,
_hash: impl FnOnce() -> Hash64,
) -> Self {
@@ -106,7 +106,7 @@ pub trait QueryContext: HasDepContext {
/// Get the query information from the TLS context.
fn current_query_job(self) -> Option<QueryJobId>;
- fn try_collect_active_jobs(self) -> Option<QueryMap<Self::DepKind>>;
+ fn try_collect_active_jobs(self) -> Option<QueryMap>;
/// Load side effects associated to the node in the previous session.
fn load_side_effects(self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects;
diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs
index 4adb4eb74..ae8414ebb 100644
--- a/compiler/rustc_query_system/src/query/plumbing.rs
+++ b/compiler/rustc_query_system/src/query/plumbing.rs
@@ -2,8 +2,8 @@
//! generate the actual methods on tcx which find and execute the provider,
//! manage the caches, and so forth.
-use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams};
-use crate::dep_graph::{DepGraphData, HasDepContext};
+use crate::dep_graph::DepGraphData;
+use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams};
use crate::ich::StableHashingContext;
use crate::query::caches::QueryCache;
#[cfg(parallel_compiler)]
@@ -14,10 +14,11 @@ use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
use crate::HandleCycleError;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sharded::Sharded;
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_data_structures::sync::Lock;
#[cfg(parallel_compiler)]
-use rustc_data_structures::{cold_path, sharded::Sharded};
+use rustc_data_structures::{outline, sync};
use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
use rustc_span::{Span, DUMMY_SP};
use std::cell::Cell;
@@ -29,68 +30,40 @@ use thin_vec::ThinVec;
use super::QueryConfig;
-pub struct QueryState<K, D: DepKind> {
- #[cfg(parallel_compiler)]
- active: Sharded<FxHashMap<K, QueryResult<D>>>,
- #[cfg(not(parallel_compiler))]
- active: Lock<FxHashMap<K, QueryResult<D>>>,
+pub struct QueryState<K> {
+ active: Sharded<FxHashMap<K, QueryResult>>,
}
/// Indicates the state of a query for a given key in a query map.
-enum QueryResult<D: DepKind> {
+enum QueryResult {
/// An already executing query. The query job can be used to await for its completion.
- Started(QueryJob<D>),
+ Started(QueryJob),
/// The query panicked. Queries trying to wait on this will raise a fatal error which will
/// silently panic.
Poisoned,
}
-impl<K, D> QueryState<K, D>
+impl<K> QueryState<K>
where
K: Eq + Hash + Copy + Debug,
- D: DepKind,
{
pub fn all_inactive(&self) -> bool {
- #[cfg(parallel_compiler)]
- {
- let shards = self.active.lock_shards();
- shards.iter().all(|shard| shard.is_empty())
- }
- #[cfg(not(parallel_compiler))]
- {
- self.active.lock().is_empty()
- }
+ self.active.lock_shards().all(|shard| shard.is_empty())
}
pub fn try_collect_active_jobs<Qcx: Copy>(
&self,
qcx: Qcx,
- make_query: fn(Qcx, K) -> QueryStackFrame<D>,
- jobs: &mut QueryMap<D>,
+ make_query: fn(Qcx, K) -> QueryStackFrame,
+ jobs: &mut QueryMap,
) -> Option<()> {
let mut active = Vec::new();
- #[cfg(parallel_compiler)]
- {
- // We use try_lock_shards here since we are called from the
- // deadlock handler, and this shouldn't be locked.
- let shards = self.active.try_lock_shards()?;
- for shard in shards.iter() {
- for (k, v) in shard.iter() {
- if let QueryResult::Started(ref job) = *v {
- active.push((*k, job.clone()));
- }
- }
- }
- }
- #[cfg(not(parallel_compiler))]
- {
- // We use try_lock here since we are called from the
- // deadlock handler, and this shouldn't be locked.
- // (FIXME: Is this relevant for non-parallel compilers? It doesn't
- // really hurt much.)
- for (k, v) in self.active.try_lock()?.iter() {
+ // We use try_lock_shards here since we are called from the
+ // deadlock handler, and this shouldn't be locked.
+ for shard in self.active.try_lock_shards() {
+ for (k, v) in shard?.iter() {
if let QueryResult::Started(ref job) = *v {
active.push((*k, job.clone()));
}
@@ -108,25 +81,25 @@ where
}
}
-impl<K, D: DepKind> Default for QueryState<K, D> {
- fn default() -> QueryState<K, D> {
+impl<K> Default for QueryState<K> {
+ fn default() -> QueryState<K> {
QueryState { active: Default::default() }
}
}
/// A type representing the responsibility to execute the job in the `job` field.
/// This will poison the relevant query if dropped.
-struct JobOwner<'tcx, K, D: DepKind>
+struct JobOwner<'tcx, K>
where
K: Eq + Hash + Copy,
{
- state: &'tcx QueryState<K, D>,
+ state: &'tcx QueryState<K>,
key: K,
}
#[cold]
#[inline(never)]
-fn mk_cycle<Q, Qcx>(query: Q, qcx: Qcx, cycle_error: CycleError<Qcx::DepKind>) -> Q::Value
+fn mk_cycle<Q, Qcx>(query: Q, qcx: Qcx, cycle_error: CycleError) -> Q::Value
where
Q: QueryConfig<Qcx>,
Qcx: QueryContext,
@@ -138,7 +111,7 @@ where
fn handle_cycle_error<Q, Qcx>(
query: Q,
qcx: Qcx,
- cycle_error: &CycleError<Qcx::DepKind>,
+ cycle_error: &CycleError,
mut error: DiagnosticBuilder<'_, ErrorGuaranteed>,
) -> Q::Value
where
@@ -148,8 +121,8 @@ where
use HandleCycleError::*;
match query.handle_cycle_error() {
Error => {
- error.emit();
- query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle)
+ let guar = error.emit();
+ query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle, guar)
}
Fatal => {
error.emit();
@@ -157,13 +130,13 @@ where
unreachable!()
}
DelayBug => {
- error.delay_as_bug();
- query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle)
+ let guar = error.delay_as_bug();
+ query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle, guar)
}
}
}
-impl<'tcx, K, D: DepKind> JobOwner<'tcx, K, D>
+impl<'tcx, K> JobOwner<'tcx, K>
where
K: Eq + Hash + Copy,
{
@@ -184,10 +157,7 @@ where
cache.complete(key, result, dep_node_index);
let job = {
- #[cfg(parallel_compiler)]
- let mut lock = state.active.get_shard_by_value(&key).lock();
- #[cfg(not(parallel_compiler))]
- let mut lock = state.active.lock();
+ let mut lock = state.active.lock_shard_by_value(&key);
match lock.remove(&key).unwrap() {
QueryResult::Started(job) => job,
QueryResult::Poisoned => panic!(),
@@ -198,10 +168,9 @@ where
}
}
-impl<'tcx, K, D> Drop for JobOwner<'tcx, K, D>
+impl<'tcx, K> Drop for JobOwner<'tcx, K>
where
K: Eq + Hash + Copy,
- D: DepKind,
{
#[inline(never)]
#[cold]
@@ -209,10 +178,7 @@ where
// Poison the query so jobs waiting on it panic.
let state = self.state;
let job = {
- #[cfg(parallel_compiler)]
- let mut shard = state.active.get_shard_by_value(&self.key).lock();
- #[cfg(not(parallel_compiler))]
- let mut shard = state.active.lock();
+ let mut shard = state.active.lock_shard_by_value(&self.key);
let job = match shard.remove(&self.key).unwrap() {
QueryResult::Started(job) => job,
QueryResult::Poisoned => panic!(),
@@ -227,10 +193,10 @@ where
}
#[derive(Clone)]
-pub(crate) struct CycleError<D: DepKind> {
+pub(crate) struct CycleError {
/// The query and related span that uses the cycle.
- pub usage: Option<(Span, QueryStackFrame<D>)>,
- pub cycle: Vec<QueryInfo<D>>,
+ pub usage: Option<(Span, QueryStackFrame)>,
+ pub cycle: Vec<QueryInfo>,
}
/// Checks if the query is already computed and in the cache.
@@ -255,7 +221,6 @@ where
#[cold]
#[inline(never)]
-#[cfg(not(parallel_compiler))]
fn cycle_error<Q, Qcx>(
query: Q,
qcx: Qcx,
@@ -281,7 +246,7 @@ fn wait_for_query<Q, Qcx>(
qcx: Qcx,
span: Span,
key: Q::Key,
- latch: QueryLatch<Qcx::DepKind>,
+ latch: QueryLatch,
current: Option<QueryJobId>,
) -> (Q::Value, Option<DepNodeIndex>)
where
@@ -300,7 +265,18 @@ where
match result {
Ok(()) => {
let Some((v, index)) = query.query_cache(qcx).lookup(&key) else {
- cold_path(|| panic!("value must be in cache after waiting"))
+ outline(|| {
+ // We didn't find the query result in the query cache. Check if it was
+ // poisoned due to a panic instead.
+ let lock = query.query_state(qcx).active.get_shard_by_value(&key).lock();
+ match lock.get(&key) {
+ // The query we waited on panicked. Continue unwinding here.
+ Some(QueryResult::Poisoned) => FatalError.raise(),
+ _ => panic!(
+ "query result must in the cache or the query must be poisoned after a wait"
+ ),
+ }
+ })
};
qcx.dep_context().profiler().query_cache_hit(index.into());
@@ -318,17 +294,14 @@ fn try_execute_query<Q, Qcx, const INCR: bool>(
qcx: Qcx,
span: Span,
key: Q::Key,
- dep_node: Option<DepNode<Qcx::DepKind>>,
+ dep_node: Option<DepNode>,
) -> (Q::Value, Option<DepNodeIndex>)
where
Q: QueryConfig<Qcx>,
Qcx: QueryContext,
{
let state = query.query_state(qcx);
- #[cfg(parallel_compiler)]
- let mut state_lock = state.active.get_shard_by_value(&key).lock();
- #[cfg(not(parallel_compiler))]
- let mut state_lock = state.active.lock();
+ let mut state_lock = state.active.lock_shard_by_value(&key);
// For the parallel compiler we need to check both the query cache and query state structures
// while holding the state lock to ensure that 1) the query has not yet completed and 2) the
@@ -360,8 +333,18 @@ where
}
Entry::Occupied(mut entry) => {
match entry.get_mut() {
- #[cfg(not(parallel_compiler))]
QueryResult::Started(job) => {
+ #[cfg(parallel_compiler)]
+ if sync::is_dyn_thread_safe() {
+ // Get the latch out
+ let latch = job.latch();
+ drop(state_lock);
+
+ // Only call `wait_for_query` if we're using a Rayon thread pool
+ // as it will attempt to mark the worker thread as blocked.
+ return wait_for_query(query, qcx, span, key, latch, current_job_id);
+ }
+
let id = job.id;
drop(state_lock);
@@ -369,14 +352,6 @@ where
// so we just return the error.
cycle_error(query, qcx, id, span)
}
- #[cfg(parallel_compiler)]
- QueryResult::Started(job) => {
- // Get the latch out
- let latch = job.latch();
- drop(state_lock);
-
- wait_for_query(query, qcx, span, key, latch, current_job_id)
- }
QueryResult::Poisoned => FatalError.raise(),
}
}
@@ -387,10 +362,10 @@ where
fn execute_job<Q, Qcx, const INCR: bool>(
query: Q,
qcx: Qcx,
- state: &QueryState<Q::Key, Qcx::DepKind>,
+ state: &QueryState<Q::Key>,
key: Q::Key,
id: QueryJobId,
- dep_node: Option<DepNode<Qcx::DepKind>>,
+ dep_node: Option<DepNode>,
) -> (Q::Value, Option<DepNodeIndex>)
where
Q: QueryConfig<Qcx>,
@@ -497,9 +472,9 @@ where
fn execute_job_incr<Q, Qcx>(
query: Q,
qcx: Qcx,
- dep_graph_data: &DepGraphData<Qcx::DepKind>,
+ dep_graph_data: &DepGraphData<Qcx::Deps>,
key: Q::Key,
- mut dep_node_opt: Option<DepNode<Qcx::DepKind>>,
+ mut dep_node_opt: Option<DepNode>,
job_id: QueryJobId,
) -> (Q::Value, DepNodeIndex)
where
@@ -563,10 +538,10 @@ where
#[inline(always)]
fn try_load_from_disk_and_cache_in_memory<Q, Qcx>(
query: Q,
- dep_graph_data: &DepGraphData<Qcx::DepKind>,
+ dep_graph_data: &DepGraphData<Qcx::Deps>,
qcx: Qcx,
key: &Q::Key,
- dep_node: &DepNode<Qcx::DepKind>,
+ dep_node: &DepNode,
) -> Option<(Q::Value, DepNodeIndex)>
where
Q: QueryConfig<Qcx>,
@@ -660,7 +635,7 @@ where
#[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")]
pub(crate) fn incremental_verify_ich<Tcx, V>(
tcx: Tcx,
- dep_graph_data: &DepGraphData<Tcx::DepKind>,
+ dep_graph_data: &DepGraphData<Tcx::Deps>,
result: &V,
prev_index: SerializedDepNodeIndex,
hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
@@ -753,7 +728,7 @@ fn ensure_must_run<Q, Qcx>(
qcx: Qcx,
key: &Q::Key,
check_cache: bool,
-) -> (bool, Option<DepNode<Qcx::DepKind>>)
+) -> (bool, Option<DepNode>)
where
Q: QueryConfig<Qcx>,
Qcx: QueryContext,
@@ -844,12 +819,8 @@ where
Some(result)
}
-pub fn force_query<Q, Qcx>(
- query: Q,
- qcx: Qcx,
- key: Q::Key,
- dep_node: DepNode<<Qcx as HasDepContext>::DepKind>,
-) where
+pub fn force_query<Q, Qcx>(query: Q, qcx: Qcx, key: Q::Key, dep_node: DepNode)
+where
Q: QueryConfig<Qcx>,
Qcx: QueryContext,
{
diff --git a/compiler/rustc_query_system/src/values.rs b/compiler/rustc_query_system/src/values.rs
index ce551078c..8848fda9d 100644
--- a/compiler/rustc_query_system/src/values.rs
+++ b/compiler/rustc_query_system/src/values.rs
@@ -1,12 +1,14 @@
-use crate::dep_graph::{DepContext, DepKind};
+use rustc_span::ErrorGuaranteed;
+
+use crate::dep_graph::DepContext;
use crate::query::QueryInfo;
-pub trait Value<Tcx: DepContext, D: DepKind>: Sized {
- fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo<D>]) -> Self;
+pub trait Value<Tcx: DepContext>: Sized {
+ fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo], guar: ErrorGuaranteed) -> Self;
}
-impl<Tcx: DepContext, T, D: DepKind> Value<Tcx, D> for T {
- default fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo<D>]) -> T {
+impl<Tcx: DepContext, T> Value<Tcx> for T {
+ default fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo], _guar: ErrorGuaranteed) -> T {
tcx.sess().abort_if_errors();
// Ideally we would use `bug!` here. But bug! is only defined in rustc_middle, and it's
// non-trivial to define it earlier.
diff --git a/compiler/rustc_resolve/messages.ftl b/compiler/rustc_resolve/messages.ftl
index f98918cba..9a970f5db 100644
--- a/compiler/rustc_resolve/messages.ftl
+++ b/compiler/rustc_resolve/messages.ftl
@@ -58,15 +58,16 @@ resolve_cannot_determine_import_resolution =
cannot determine resolution for the import
.note = import resolution is stuck, try simplifying other imports
+resolve_cannot_determine_macro_resolution =
+ cannot determine resolution for the {$kind} `{$path}`
+ .note = import resolution is stuck, try simplifying macro imports
+
resolve_cannot_find_ident_in_this_scope =
cannot find {$expected} `{$ident}` in this scope
resolve_cannot_glob_import_possible_crates =
cannot glob-import all possible crates
-resolve_cannot_use_self_type_here =
- can't use `Self` here
-
resolve_change_import_binding =
you can use `as` to change the binding name of the import
@@ -86,9 +87,6 @@ resolve_const_not_member_of_trait =
const `{$const_}` is not a member of trait `{$trait_}`
.label = not a member of trait `{$trait_}`
-resolve_const_param_from_outer_fn =
- const parameter from outer function
-
resolve_const_param_in_enum_discriminant =
const parameters may not be used in enum discriminant values
@@ -115,10 +113,19 @@ resolve_forward_declared_generic_param =
generic parameters with a default cannot use forward declared identifiers
.label = defaulted generic parameters cannot be forward declared
-resolve_generic_params_from_outer_function =
- can't use generic parameters from outer function
- .label = use of generic parameter from outer function
- .suggestion = try using a local generic parameter instead
+resolve_generic_params_from_outer_item =
+ can't use generic parameters from outer item
+ .label = use of generic parameter from outer item
+ .refer_to_type_directly = refer to the type directly here instead
+ .suggestion = try introducing a local generic parameter here
+
+resolve_generic_params_from_outer_item_const_param = const parameter from outer item
+
+resolve_generic_params_from_outer_item_self_ty_alias = `Self` type implicitly declared here, by this `impl`
+
+resolve_generic_params_from_outer_item_self_ty_param = can't use `Self` here
+
+resolve_generic_params_from_outer_item_ty_param = type parameter from outer item
resolve_glob_import_doesnt_reexport =
glob import doesn't reexport anything because no candidate is public enough
@@ -273,9 +280,6 @@ resolve_type_not_member_of_trait =
type `{$type_}` is not a member of trait `{$trait_}`
.label = not a member of trait `{$trait_}`
-resolve_type_param_from_outer_fn =
- type parameter from outer function
-
resolve_type_param_in_enum_discriminant =
type parameters may not be used in enum discriminant values
@@ -311,9 +315,6 @@ resolve_unreachable_label_suggestion_use_similarly_named =
resolve_unreachable_label_with_similar_name_exists =
a label with a similar name exists but is unreachable
-resolve_use_a_type_here_instead =
- use a type here instead
-
resolve_variable_bound_with_different_mode =
variable `{$variable_name}` is bound inconsistently across alternatives separated by `|`
.label = bound in different ways
diff --git a/compiler/rustc_resolve/src/build_reduced_graph.rs b/compiler/rustc_resolve/src/build_reduced_graph.rs
index 127bec22c..a18109574 100644
--- a/compiler/rustc_resolve/src/build_reduced_graph.rs
+++ b/compiler/rustc_resolve/src/build_reduced_graph.rs
@@ -247,8 +247,6 @@ impl<'a, 'b, 'tcx> BuildReducedGraphVisitor<'a, 'b, 'tcx> {
})
}
ast::VisibilityKind::Restricted { ref path, id, .. } => {
- // Make `PRIVATE_IN_PUBLIC` lint a hard error.
- self.r.has_pub_restricted = true;
// For visibilities we are not ready to provide correct implementation of "uniform
// paths" right now, so on 2018 edition we only allow module-relative paths for now.
// On 2015 edition visibilities are resolved as crate-relative by default,
@@ -700,10 +698,7 @@ impl<'a, 'b, 'tcx> BuildReducedGraphVisitor<'a, 'b, 'tcx> {
// These items live in the type namespace.
ItemKind::TyAlias(..) => {
- let res = Res::Def(
- DefKind::TyAlias { lazy: self.r.tcx.features().lazy_type_alias },
- def_id,
- );
+ let res = Res::Def(DefKind::TyAlias, def_id);
self.r.define(parent, ident, TypeNS, (res, vis, sp, expansion));
}
@@ -870,10 +865,7 @@ impl<'a, 'b, 'tcx> BuildReducedGraphVisitor<'a, 'b, 'tcx> {
let imported_binding = self.r.import(binding, import);
if parent == self.r.graph_root {
if let Some(entry) = self.r.extern_prelude.get(&ident.normalize_to_macros_2_0()) {
- if expansion != LocalExpnId::ROOT
- && orig_name.is_some()
- && entry.extern_crate_item.is_none()
- {
+ if expansion != LocalExpnId::ROOT && orig_name.is_some() && !entry.is_import() {
let msg = "macro-expanded `extern crate` items cannot \
shadow names passed with `--extern`";
self.r.tcx.sess.span_err(item.span, msg);
@@ -884,10 +876,14 @@ impl<'a, 'b, 'tcx> BuildReducedGraphVisitor<'a, 'b, 'tcx> {
return;
}
}
- let entry = self.r.extern_prelude.entry(ident.normalize_to_macros_2_0()).or_insert(
- ExternPreludeEntry { extern_crate_item: None, introduced_by_item: true },
- );
- entry.extern_crate_item = Some(imported_binding);
+ let entry = self
+ .r
+ .extern_prelude
+ .entry(ident.normalize_to_macros_2_0())
+ .or_insert(ExternPreludeEntry { binding: None, introduced_by_item: true });
+ // Binding from `extern crate` item in source code can replace
+ // a binding from `--extern` on command line here.
+ entry.binding = Some(imported_binding);
if orig_name.is_some() {
entry.introduced_by_item = true;
}
@@ -951,7 +947,7 @@ impl<'a, 'b, 'tcx> BuildReducedGraphVisitor<'a, 'b, 'tcx> {
DefKind::Struct
| DefKind::Union
| DefKind::Variant
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::ForeignTy
| DefKind::OpaqueTy
| DefKind::TraitAlias
@@ -1240,7 +1236,7 @@ impl<'a, 'b, 'tcx> BuildReducedGraphVisitor<'a, 'b, 'tcx> {
use_span_with_attributes: span,
use_span: span,
root_span: span,
- span: span,
+ span,
module_path: Vec::new(),
vis: Cell::new(Some(vis)),
used: Cell::new(true),
diff --git a/compiler/rustc_resolve/src/diagnostics.rs b/compiler/rustc_resolve/src/diagnostics.rs
index cd1a9b934..907a6b1c4 100644
--- a/compiler/rustc_resolve/src/diagnostics.rs
+++ b/compiler/rustc_resolve/src/diagnostics.rs
@@ -553,43 +553,40 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
resolution_error: ResolutionError<'a>,
) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
match resolution_error {
- ResolutionError::GenericParamsFromOuterFunction(outer_res, has_generic_params) => {
- let mut err = struct_span_err!(
- self.tcx.sess,
+ ResolutionError::GenericParamsFromOuterItem(outer_res, has_generic_params) => {
+ use errs::GenericParamsFromOuterItemLabel as Label;
+ let mut err = errs::GenericParamsFromOuterItem {
span,
- E0401,
- "can't use generic parameters from outer function",
- );
- err.span_label(span, "use of generic parameter from outer function");
+ label: None,
+ refer_to_type_directly: None,
+ sugg: None,
+ };
let sm = self.tcx.sess.source_map();
let def_id = match outer_res {
Res::SelfTyParam { .. } => {
- err.span_label(span, "can't use `Self` here");
- return err;
+ err.label = Some(Label::SelfTyParam(span));
+ return self.tcx.sess.create_err(err);
}
Res::SelfTyAlias { alias_to: def_id, .. } => {
- err.span_label(
- reduce_impl_span_to_impl_keyword(sm, self.def_span(def_id)),
- "`Self` type implicitly declared here, by this `impl`",
- );
- err.span_label(span, "use a type here instead");
- return err;
+ err.label = Some(Label::SelfTyAlias(reduce_impl_span_to_impl_keyword(
+ sm,
+ self.def_span(def_id),
+ )));
+ err.refer_to_type_directly = Some(span);
+ return self.tcx.sess.create_err(err);
}
Res::Def(DefKind::TyParam, def_id) => {
- err.span_label(self.def_span(def_id), "type parameter from outer function");
+ err.label = Some(Label::TyParam(self.def_span(def_id)));
def_id
}
Res::Def(DefKind::ConstParam, def_id) => {
- err.span_label(
- self.def_span(def_id),
- "const parameter from outer function",
- );
+ err.label = Some(Label::ConstParam(self.def_span(def_id)));
def_id
}
_ => {
bug!(
- "GenericParamsFromOuterFunction should only be used with \
+ "GenericParamsFromOuterItem should only be used with \
Res::SelfTyParam, Res::SelfTyAlias, DefKind::TyParam or \
DefKind::ConstParam"
);
@@ -597,9 +594,6 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
};
if let HasGenericParams::Yes(span) = has_generic_params {
- // Try to retrieve the span of the function signature and generate a new
- // message with a local type or const parameter.
- let sugg_msg = "try using a local generic parameter instead";
let name = self.tcx.item_name(def_id);
let (span, snippet) = if span.is_empty() {
let snippet = format!("<{name}>");
@@ -609,11 +603,10 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
let snippet = format!("{name}, ");
(span, snippet)
};
- // Suggest the modification to the user
- err.span_suggestion(span, sugg_msg, snippet, Applicability::MaybeIncorrect);
+ err.sugg = Some(errs::GenericParamsFromOuterItemSugg { span, snippet });
}
- err
+ self.tcx.sess.create_err(err)
}
ResolutionError::NameAlreadyUsedInParameterList(name, first_use_span) => self
.tcx
@@ -1032,7 +1025,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
.get(&expn_id)
.into_iter()
.flatten()
- .map(|ident| TypoSuggestion::typo_from_ident(*ident, res)),
+ .map(|(ident, _)| TypoSuggestion::typo_from_ident(*ident, res)),
);
}
}
@@ -2603,7 +2596,9 @@ fn show_candidates(
);
if let [first, .., last] = &path[..] {
let sp = first.ident.span.until(last.ident.span);
- if sp.can_be_used_for_suggestions() {
+ // Our suggestion is empty, so make sure the span is not empty (or we'd ICE).
+ // Can happen for derive-generated spans.
+ if sp.can_be_used_for_suggestions() && !sp.is_empty() {
err.span_suggestion_verbose(
sp,
format!("if you import `{}`, refer to it directly", last.ident),
@@ -2753,7 +2748,13 @@ fn search_for_any_use_in_items(items: &[P<ast::Item>]) -> Option<Span> {
for item in items {
if let ItemKind::Use(..) = item.kind {
if is_span_suitable_for_use_injection(item.span) {
- return Some(item.span.shrink_to_lo());
+ let mut lo = item.span.lo();
+ for attr in &item.attrs {
+ if attr.span.eq_ctxt(item.span) {
+ lo = std::cmp::min(lo, attr.span.lo());
+ }
+ }
+ return Some(Span::new(lo, lo, item.span.ctxt(), item.span.parent()));
}
}
}
diff --git a/compiler/rustc_resolve/src/errors.rs b/compiler/rustc_resolve/src/errors.rs
index e4b89c658..72ff959bb 100644
--- a/compiler/rustc_resolve/src/errors.rs
+++ b/compiler/rustc_resolve/src/errors.rs
@@ -33,6 +33,40 @@ pub(crate) struct CrateRootNamesMustBeNamedExplicitly(#[primary_span] pub(crate)
pub(crate) struct ResolutionError(#[primary_span] pub(crate) Span);
#[derive(Diagnostic)]
+#[diag(resolve_generic_params_from_outer_item, code = "E0401")]
+pub(crate) struct GenericParamsFromOuterItem {
+ #[primary_span]
+ #[label]
+ pub(crate) span: Span,
+ #[subdiagnostic]
+ pub(crate) label: Option<GenericParamsFromOuterItemLabel>,
+ #[label(resolve_refer_to_type_directly)]
+ pub(crate) refer_to_type_directly: Option<Span>,
+ #[subdiagnostic]
+ pub(crate) sugg: Option<GenericParamsFromOuterItemSugg>,
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum GenericParamsFromOuterItemLabel {
+ #[label(resolve_generic_params_from_outer_item_self_ty_param)]
+ SelfTyParam(#[primary_span] Span),
+ #[label(resolve_generic_params_from_outer_item_self_ty_alias)]
+ SelfTyAlias(#[primary_span] Span),
+ #[label(resolve_generic_params_from_outer_item_ty_param)]
+ TyParam(#[primary_span] Span),
+ #[label(resolve_generic_params_from_outer_item_const_param)]
+ ConstParam(#[primary_span] Span),
+}
+
+#[derive(Subdiagnostic)]
+#[suggestion(resolve_suggestion, code = "{snippet}", applicability = "maybe-incorrect")]
+pub(crate) struct GenericParamsFromOuterItemSugg {
+ #[primary_span]
+ pub(crate) span: Span,
+ pub(crate) snippet: String,
+}
+
+#[derive(Diagnostic)]
#[diag(resolve_name_is_already_used_as_generic_parameter, code = "E0403")]
pub(crate) struct NameAlreadyUsedInParameterList {
#[primary_span]
@@ -655,6 +689,16 @@ pub(crate) struct CannotDetermineImportResolution {
}
#[derive(Diagnostic)]
+#[diag(resolve_cannot_determine_macro_resolution)]
+#[note]
+pub(crate) struct CannotDetermineMacroResolution {
+ #[primary_span]
+ pub(crate) span: Span,
+ pub(crate) kind: &'static str,
+ pub(crate) path: String,
+}
+
+#[derive(Diagnostic)]
#[diag(resolve_cannot_be_reexported_private, code = "E0364")]
pub(crate) struct CannotBeReexportedPrivate {
#[primary_span]
diff --git a/compiler/rustc_resolve/src/ident.rs b/compiler/rustc_resolve/src/ident.rs
index 3bd9cea27..54388f80f 100644
--- a/compiler/rustc_resolve/src/ident.rs
+++ b/compiler/rustc_resolve/src/ident.rs
@@ -1,7 +1,5 @@
use rustc_ast::{self as ast, NodeId};
-use rustc_feature::is_builtin_attr_name;
use rustc_hir::def::{DefKind, Namespace, NonMacroAttrKind, PartialRes, PerNS};
-use rustc_hir::PrimTy;
use rustc_middle::bug;
use rustc_middle::ty;
use rustc_session::lint::builtin::PROC_MACRO_DERIVE_RESOLUTION_FALLBACK;
@@ -9,7 +7,7 @@ use rustc_session::lint::BuiltinLintDiagnostics;
use rustc_span::def_id::LocalDefId;
use rustc_span::hygiene::{ExpnId, ExpnKind, LocalExpnId, MacroKind, SyntaxContext};
use rustc_span::symbol::{kw, Ident};
-use rustc_span::{Span, DUMMY_SP};
+use rustc_span::Span;
use crate::errors::{ParamKindInEnumDiscriminant, ParamKindInNonTrivialAnonConst};
use crate::late::{
@@ -423,32 +421,22 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
orig_ident.span.ctxt(),
|this, scope, use_prelude, ctxt| {
let ident = Ident::new(orig_ident.name, orig_ident.span.with_ctxt(ctxt));
- let ok = |res, span, arenas| {
- Ok((
- (res, Visibility::Public, span, LocalExpnId::ROOT).to_name_binding(arenas),
- Flags::empty(),
- ))
- };
let result = match scope {
Scope::DeriveHelpers(expn_id) => {
- if let Some(attr) = this
- .helper_attrs
- .get(&expn_id)
- .and_then(|attrs| attrs.iter().rfind(|i| ident == **i))
- {
- let binding = (
- Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper),
- Visibility::Public,
- attr.span,
- expn_id,
- )
- .to_name_binding(this.arenas);
+ if let Some(binding) = this.helper_attrs.get(&expn_id).and_then(|attrs| {
+ attrs.iter().rfind(|(i, _)| ident == *i).map(|(_, binding)| *binding)
+ }) {
Ok((binding, Flags::empty()))
} else {
Err(Determinacy::Determined)
}
}
Scope::DeriveHelpersCompat => {
+ // FIXME: Try running this logic eariler, to allocate name bindings for
+ // legacy derive helpers when creating an attribute invocation with
+ // following derives. Legacy derive helpers are not common, so it shouldn't
+ // affect performance. It should also allow to remove the `derives`
+ // component from `ParentScope`.
let mut result = Err(Determinacy::Determined);
for derive in parent_scope.derives {
let parent_scope = &ParentScope { derives: &[], ..*parent_scope };
@@ -461,11 +449,14 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
) {
Ok((Some(ext), _)) => {
if ext.helper_attrs.contains(&ident.name) {
- result = ok(
+ let binding = (
Res::NonMacroAttr(NonMacroAttrKind::DeriveHelperCompat),
+ Visibility::Public,
derive.span,
- this.arenas,
- );
+ LocalExpnId::ROOT,
+ )
+ .to_name_binding(this.arenas);
+ result = Ok((binding, Flags::empty()));
break;
}
}
@@ -562,17 +553,10 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
)),
}
}
- Scope::BuiltinAttrs => {
- if is_builtin_attr_name(ident.name) {
- ok(
- Res::NonMacroAttr(NonMacroAttrKind::Builtin(ident.name)),
- DUMMY_SP,
- this.arenas,
- )
- } else {
- Err(Determinacy::Determined)
- }
- }
+ Scope::BuiltinAttrs => match this.builtin_attrs_bindings.get(&ident.name) {
+ Some(binding) => Ok((*binding, Flags::empty())),
+ None => Err(Determinacy::Determined),
+ },
Scope::ExternPrelude => {
match this.extern_prelude_get(ident, finalize.is_some()) {
Some(binding) => Ok((binding, Flags::empty())),
@@ -581,8 +565,8 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
)),
}
}
- Scope::ToolPrelude => match this.registered_tools.get(&ident).cloned() {
- Some(ident) => ok(Res::ToolMod, ident.span, this.arenas),
+ Scope::ToolPrelude => match this.registered_tool_bindings.get(&ident) {
+ Some(binding) => Ok((*binding, Flags::empty())),
None => Err(Determinacy::Determined),
},
Scope::StdLibPrelude => {
@@ -603,8 +587,8 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
}
result
}
- Scope::BuiltinTypes => match PrimTy::from_name(ident.name) {
- Some(prim_ty) => ok(Res::PrimTy(prim_ty), DUMMY_SP, this.arenas),
+ Scope::BuiltinTypes => match this.builtin_types_bindings.get(&ident.name) {
+ Some(binding) => Ok((*binding, Flags::empty())),
None => Err(Determinacy::Determined),
},
};
@@ -842,9 +826,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
if ns == TypeNS {
if ident.name == kw::Crate || ident.name == kw::DollarCrate {
let module = self.resolve_crate_root(ident);
- let binding = (module, Visibility::Public, module.span, LocalExpnId::ROOT)
- .to_name_binding(self.arenas);
- return Ok(binding);
+ return Ok(self.module_self_bindings[&module]);
} else if ident.name == kw::Super || ident.name == kw::SelfLower {
// FIXME: Implement these with renaming requirements so that e.g.
// `use super;` doesn't work, but `use super as name;` does.
@@ -990,9 +972,8 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
// progress, we have to ignore those potential unresolved invocations from other modules
// and prohibit access to macro-expanded `macro_export` macros instead (unless restricted
// shadowing is enabled, see `macro_expanded_macro_export_errors`).
- let unexpanded_macros = !module.unexpanded_invocations.borrow().is_empty();
if let Some(binding) = binding {
- if !unexpanded_macros || ns == MacroNS || restricted_shadowing {
+ if binding.determined() || ns == MacroNS || restricted_shadowing {
return check_usable(self, binding);
} else {
return Err((Undetermined, Weak::No));
@@ -1009,7 +990,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
// Check if one of unexpanded macros can still define the name,
// if it can then our "no resolution" result is not determined and can be invalidated.
- if unexpanded_macros {
+ if !module.unexpanded_invocations.borrow().is_empty() {
return Err((Undetermined, Weak::Yes));
}
@@ -1247,10 +1228,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
if let Some(span) = finalize {
self.report_error(
span,
- ResolutionError::GenericParamsFromOuterFunction(
- res,
- has_generic_params,
- ),
+ ResolutionError::GenericParamsFromOuterItem(res, has_generic_params),
);
}
return Res::Err;
@@ -1314,10 +1292,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
if let Some(span) = finalize {
self.report_error(
span,
- ResolutionError::GenericParamsFromOuterFunction(
- res,
- has_generic_params,
- ),
+ ResolutionError::GenericParamsFromOuterItem(res, has_generic_params),
);
}
return Res::Err;
diff --git a/compiler/rustc_resolve/src/imports.rs b/compiler/rustc_resolve/src/imports.rs
index a175d9f6c..d271519a8 100644
--- a/compiler/rustc_resolve/src/imports.rs
+++ b/compiler/rustc_resolve/src/imports.rs
@@ -319,10 +319,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
// We should replace the `old_binding` with `binding` regardless
// of whether they has same resolution or not when they are
// imported from the same glob-import statement.
- // However we currently using `Some(old_binding)` for back compact
- // purposes.
- // This case can be removed after once `Undetermined` is prepared
- // for glob-imports.
+ resolution.binding = Some(binding);
} else if res != old_binding.res() {
let binding = if warn_ambiguity {
this.warn_ambiguity(
@@ -805,13 +802,11 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
// For better failure detection, pretend that the import will
// not define any names while resolving its module path.
let orig_vis = import.vis.take();
- let binding = this.resolve_ident_in_module(
+ let binding = this.maybe_resolve_ident_in_module(
module,
source,
ns,
&import.parent_scope,
- None,
- None,
);
import.vis.set(orig_vis);
source_bindings[ns].set(binding);
@@ -996,9 +991,9 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
if !is_prelude
&& let Some(max_vis) = max_vis.get()
&& !max_vis.is_at_least(import.expect_vis(), self.tcx)
- {
- self.lint_buffer.buffer_lint(UNUSED_IMPORTS, id, import.span, fluent::resolve_glob_import_doesnt_reexport);
- }
+ {
+ self.lint_buffer.buffer_lint(UNUSED_IMPORTS, id, import.span, fluent::resolve_glob_import_doesnt_reexport);
+ }
return None;
}
_ => unreachable!(),
diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs
index c87db96a5..15ec727e4 100644
--- a/compiler/rustc_resolve/src/late.rs
+++ b/compiler/rustc_resolve/src/late.rs
@@ -311,6 +311,10 @@ enum LifetimeRibKind {
/// error on default object bounds (e.g., `Box<dyn Foo>`).
AnonymousReportError,
+ /// Resolves elided lifetimes to `'static`, but gives a warning that this behavior
+ /// is a bug and will be reverted soon.
+ AnonymousWarn(NodeId),
+
/// Signal we cannot find which should be the anonymous lifetime.
ElisionFailure,
@@ -470,7 +474,7 @@ impl<'a> PathSource<'a> {
| DefKind::Enum
| DefKind::Trait
| DefKind::TraitAlias
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::AssocTy
| DefKind::TyParam
| DefKind::OpaqueTy
@@ -509,7 +513,7 @@ impl<'a> PathSource<'a> {
DefKind::Struct
| DefKind::Union
| DefKind::Variant
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::AssocTy,
_,
) | Res::SelfTyParam { .. }
@@ -768,9 +772,10 @@ impl<'a: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast,
self.r.record_partial_res(ty.id, PartialRes::new(res));
visit::walk_ty(self, ty)
}
- TyKind::ImplTrait(..) => {
+ TyKind::ImplTrait(node_id, _) => {
let candidates = self.lifetime_elision_candidates.take();
visit::walk_ty(self, ty);
+ self.record_lifetime_params_for_impl_trait(*node_id);
self.lifetime_elision_candidates = candidates;
}
TyKind::TraitObject(bounds, ..) => {
@@ -905,8 +910,8 @@ impl<'a: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast,
&sig.decl.output,
);
- if let Some((async_node_id, span)) = sig.header.asyncness.opt_return_id() {
- this.record_lifetime_params_for_impl_trait(async_node_id, span);
+ if let Some((async_node_id, _)) = sig.header.asyncness.opt_return_id() {
+ this.record_lifetime_params_for_impl_trait(async_node_id);
}
},
);
@@ -947,8 +952,8 @@ impl<'a: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast,
&declaration.output,
);
- if let Some((async_node_id, span)) = async_node_id {
- this.record_lifetime_params_for_impl_trait(async_node_id, span);
+ if let Some((async_node_id, _)) = async_node_id {
+ this.record_lifetime_params_for_impl_trait(async_node_id);
}
},
);
@@ -1104,6 +1109,7 @@ impl<'a: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast,
}
},
AssocConstraintKind::Bound { ref bounds } => {
+ self.record_lifetime_params_for_impl_trait(constraint.id);
walk_list!(self, visit_param_bound, bounds, BoundKind::Bound);
}
}
@@ -1148,6 +1154,7 @@ impl<'a: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast,
}
LifetimeRibKind::AnonymousCreateParameter { .. }
| LifetimeRibKind::AnonymousReportError
+ | LifetimeRibKind::AnonymousWarn(_)
| LifetimeRibKind::Elided(_)
| LifetimeRibKind::ElisionFailure
| LifetimeRibKind::ConcreteAnonConst(_)
@@ -1515,6 +1522,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
// lifetime would be illegal.
LifetimeRibKind::Item
| LifetimeRibKind::AnonymousReportError
+ | LifetimeRibKind::AnonymousWarn(_)
| LifetimeRibKind::ElisionFailure => Some(LifetimeUseSet::Many),
// An anonymous lifetime is legal here, and bound to the right
// place, go ahead.
@@ -1576,7 +1584,8 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
| LifetimeRibKind::Elided(_)
| LifetimeRibKind::Generics { .. }
| LifetimeRibKind::ElisionFailure
- | LifetimeRibKind::AnonymousReportError => {}
+ | LifetimeRibKind::AnonymousReportError
+ | LifetimeRibKind::AnonymousWarn(_) => {}
}
}
@@ -1616,6 +1625,23 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
self.record_lifetime_res(lifetime.id, res, elision_candidate);
return;
}
+ LifetimeRibKind::AnonymousWarn(node_id) => {
+ let msg = if elided {
+ "`&` without an explicit lifetime name cannot be used here"
+ } else {
+ "`'_` cannot be used here"
+ };
+ self.r.lint_buffer.buffer_lint_with_diagnostic(
+ lint::builtin::ELIDED_LIFETIMES_IN_ASSOCIATED_CONSTANT,
+ node_id,
+ lifetime.ident.span,
+ msg,
+ lint::BuiltinLintDiagnostics::AssociatedConstElidedLifetime {
+ elided,
+ span: lifetime.ident.span,
+ },
+ );
+ }
LifetimeRibKind::AnonymousReportError => {
let (msg, note) = if elided {
(
@@ -1740,7 +1766,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
Res::Def(DefKind::Struct, def_id)
| Res::Def(DefKind::Union, def_id)
| Res::Def(DefKind::Enum, def_id)
- | Res::Def(DefKind::TyAlias { .. }, def_id)
+ | Res::Def(DefKind::TyAlias, def_id)
| Res::Def(DefKind::Trait, def_id)
if i + 1 == proj_start =>
{
@@ -1811,7 +1837,8 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
//
// impl Foo for std::cell::Ref<u32> // note lack of '_
// async fn foo(_: std::cell::Ref<u32>) { ... }
- LifetimeRibKind::AnonymousCreateParameter { report_in_path: true, .. } => {
+ LifetimeRibKind::AnonymousCreateParameter { report_in_path: true, .. }
+ | LifetimeRibKind::AnonymousWarn(_) => {
let sess = self.r.tcx.sess;
let mut err = rustc_errors::struct_span_err!(
sess,
@@ -2423,7 +2450,11 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
ItemKind::Const(box ast::ConstItem { ref generics, ref ty, ref expr, .. }) => {
self.with_generic_param_rib(
&generics.params,
- RibKind::Item(HasGenericParams::Yes(generics.span)),
+ RibKind::Item(if self.r.tcx.features().generic_const_items {
+ HasGenericParams::Yes(generics.span)
+ } else {
+ HasGenericParams::No
+ }),
LifetimeRibKind::Generics {
binder: item.id,
kind: LifetimeBinderKind::ConstItem,
@@ -2898,7 +2929,6 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
match &item.kind {
AssocItemKind::Const(box ast::ConstItem { generics, ty, expr, .. }) => {
debug!("resolve_implementation AssocItemKind::Const");
-
self.with_generic_param_rib(
&generics.params,
RibKind::AssocItem,
@@ -2908,28 +2938,30 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
kind: LifetimeBinderKind::ConstItem,
},
|this| {
- // If this is a trait impl, ensure the const
- // exists in trait
- this.check_trait_item(
- item.id,
- item.ident,
- &item.kind,
- ValueNS,
- item.span,
- seen_trait_items,
- |i, s, c| ConstNotMemberOfTrait(i, s, c),
- );
+ this.with_lifetime_rib(LifetimeRibKind::AnonymousWarn(item.id), |this| {
+ // If this is a trait impl, ensure the const
+ // exists in trait
+ this.check_trait_item(
+ item.id,
+ item.ident,
+ &item.kind,
+ ValueNS,
+ item.span,
+ seen_trait_items,
+ |i, s, c| ConstNotMemberOfTrait(i, s, c),
+ );
- this.visit_generics(generics);
- this.visit_ty(ty);
- if let Some(expr) = expr {
- // We allow arbitrary const expressions inside of associated consts,
- // even if they are potentially not const evaluatable.
- //
- // Type parameters can already be used and as associated consts are
- // not used as part of the type system, this is far less surprising.
- this.resolve_const_body(expr, None);
- }
+ this.visit_generics(generics);
+ this.visit_ty(ty);
+ if let Some(expr) = expr {
+ // We allow arbitrary const expressions inside of associated consts,
+ // even if they are potentially not const evaluatable.
+ //
+ // Type parameters can already be used and as associated consts are
+ // not used as part of the type system, this is far less surprising.
+ this.resolve_const_body(expr, None);
+ }
+ });
},
);
}
@@ -4108,6 +4140,12 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
});
}
+ fn resolve_expr_field(&mut self, f: &'ast ExprField, e: &'ast Expr) {
+ self.resolve_expr(&f.expr, Some(e));
+ self.visit_ident(f.ident);
+ walk_list!(self, visit_attribute, f.attrs.iter());
+ }
+
fn resolve_expr(&mut self, expr: &'ast Expr, parent: Option<&'ast Expr>) {
// First, record candidate traits for this expression if it could
// result in the invocation of a method call.
@@ -4123,7 +4161,19 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
ExprKind::Struct(ref se) => {
self.smart_resolve_path(expr.id, &se.qself, &se.path, PathSource::Struct);
- visit::walk_expr(self, expr);
+ // This is the same as `visit::walk_expr(self, expr);`, but we want to pass the
+ // parent in for accurate suggestions when encountering `Foo { bar }` that should
+ // have been `Foo { bar: self.bar }`.
+ if let Some(qself) = &se.qself {
+ self.visit_ty(&qself.ty);
+ }
+ self.visit_path(&se.path, expr.id);
+ walk_list!(self, resolve_expr_field, &se.fields, expr);
+ match &se.rest {
+ StructRest::Base(expr) => self.visit_expr(expr),
+ StructRest::Rest(_span) => {}
+ StructRest::None => {}
+ }
}
ExprKind::Break(Some(label), _) | ExprKind::Continue(Some(label)) => {
@@ -4148,7 +4198,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
self.resolve_expr(e, Some(&expr));
}
- ExprKind::Let(ref pat, ref scrutinee, _) => {
+ ExprKind::Let(ref pat, ref scrutinee, _, _) => {
self.visit_expr(scrutinee);
self.resolve_pattern_top(pat, PatternSource::Let);
}
@@ -4336,7 +4386,7 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
/// We include all lifetime parameters, either named or "Fresh".
/// The order of those parameters does not matter, as long as it is
/// deterministic.
- fn record_lifetime_params_for_impl_trait(&mut self, impl_trait_node_id: NodeId, span: Span) {
+ fn record_lifetime_params_for_impl_trait(&mut self, impl_trait_node_id: NodeId) {
let mut extra_lifetime_params = vec![];
for rib in self.lifetime_ribs.iter().rev() {
@@ -4349,14 +4399,10 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> {
extra_lifetime_params.extend(earlier_fresh);
}
}
- LifetimeRibKind::Generics { .. } => {}
- _ => {
- // We are in a function definition. We should only find `Generics`
- // and `AnonymousCreateParameter` inside the innermost `Item`.
- span_bug!(span, "unexpected rib kind: {:?}", rib.kind)
- }
+ _ => {}
}
}
+
self.r.extra_lifetime_params_map.insert(impl_trait_node_id, extra_lifetime_params);
}
diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs
index c34b7df9b..bc5f8a37b 100644
--- a/compiler/rustc_resolve/src/late/diagnostics.rs
+++ b/compiler/rustc_resolve/src/late/diagnostics.rs
@@ -41,7 +41,7 @@ type Res = def::Res<ast::NodeId>;
/// A field or associated item from self type suggested in case of resolution failure.
enum AssocSuggestion {
- Field,
+ Field(Span),
MethodWithSelf { called: bool },
AssocFn { called: bool },
AssocType,
@@ -51,7 +51,7 @@ enum AssocSuggestion {
impl AssocSuggestion {
fn action(&self) -> &'static str {
match self {
- AssocSuggestion::Field => "use the available field",
+ AssocSuggestion::Field(_) => "use the available field",
AssocSuggestion::MethodWithSelf { called: true } => {
"call the method with the fully-qualified path"
}
@@ -186,7 +186,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
fallback_label: format!("not a {expected}"),
span,
span_label: match res {
- Res::Def(kind, def_id) if kind == DefKind::TyParam => {
+ Res::Def(DefKind::TyParam, def_id) => {
Some((self.r.def_span(def_id), "found this type parameter"))
}
_ => None,
@@ -214,7 +214,9 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
module: None,
}
} else {
- let item_span = path.last().unwrap().ident.span;
+ let mut span_label = None;
+ let item_ident = path.last().unwrap().ident;
+ let item_span = item_ident.span;
let (mod_prefix, mod_str, module, suggestion) = if path.len() == 1 {
debug!(?self.diagnostic_metadata.current_impl_items);
debug!(?self.diagnostic_metadata.current_function);
@@ -224,32 +226,75 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
&& let FnKind::Fn(_, _, sig, ..) = fn_kind
&& let Some(items) = self.diagnostic_metadata.current_impl_items
&& let Some(item) = items.iter().find(|i| {
- if let AssocItemKind::Fn(..) | AssocItemKind::Const(..) = &i.kind
- && i.ident.name == item_str.name
- // don't suggest if the item is in Fn signature arguments
- // issue #112590
+ i.ident.name == item_str.name
+ // Don't suggest if the item is in Fn signature arguments (#112590).
&& !sig.span.contains(item_span)
- {
- debug!(?item_str.name);
- return true
- }
- false
})
{
- let self_sugg = match &item.kind {
- AssocItemKind::Fn(fn_) if fn_.sig.decl.has_self() => "self.",
- _ => "Self::",
+ let sp = item_span.shrink_to_lo();
+
+ // Account for `Foo { field }` when suggesting `self.field` so we result on
+ // `Foo { field: self.field }`.
+ let field = match source {
+ PathSource::Expr(Some(Expr { kind: ExprKind::Struct(expr), .. })) => {
+ expr.fields.iter().find(|f| f.ident == item_ident)
+ }
+ _ => None,
+ };
+ let pre = if let Some(field) = field && field.is_shorthand {
+ format!("{item_ident}: ")
+ } else {
+ String::new()
+ };
+ // Ensure we provide a structured suggestion for an assoc fn only for
+ // expressions that are actually a fn call.
+ let is_call = match field {
+ Some(ast::ExprField { expr, .. }) => {
+ matches!(expr.kind, ExprKind::Call(..))
+ }
+ _ => matches!(
+ source,
+ PathSource::Expr(Some(Expr { kind: ExprKind::Call(..), ..})),
+ ),
};
- Some((
- item_span.shrink_to_lo(),
- match &item.kind {
- AssocItemKind::Fn(..) => "consider using the associated function",
- AssocItemKind::Const(..) => "consider using the associated constant",
- _ => unreachable!("item kind was filtered above"),
- },
- self_sugg.to_string()
- ))
+ match &item.kind {
+ AssocItemKind::Fn(fn_)
+ if (!sig.decl.has_self() || !is_call) && fn_.sig.decl.has_self() => {
+ // Ensure that we only suggest `self.` if `self` is available,
+ // you can't call `fn foo(&self)` from `fn bar()` (#115992).
+ // We also want to mention that the method exists.
+ span_label = Some((
+ item.ident.span,
+ "a method by that name is available on `Self` here",
+ ));
+ None
+ }
+ AssocItemKind::Fn(fn_)
+ if !fn_.sig.decl.has_self() && !is_call => {
+ span_label = Some((
+ item.ident.span,
+ "an associated function by that name is available on `Self` here",
+ ));
+ None
+ }
+ AssocItemKind::Fn(fn_) if fn_.sig.decl.has_self() => Some((
+ sp,
+ "consider using the method on `Self`",
+ format!("{pre}self."),
+ )),
+ AssocItemKind::Fn(_) => Some((
+ sp,
+ "consider using the associated function on `Self`",
+ format!("{pre}Self::"),
+ )),
+ AssocItemKind::Const(..) => Some((
+ sp,
+ "consider using the associated constant on `Self`",
+ format!("{pre}Self::"),
+ )),
+ _ => None
+ }
} else {
None
};
@@ -314,7 +359,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
msg: format!("cannot find {expected} `{item_str}` in {mod_prefix}{mod_str}"),
fallback_label,
span: item_span,
- span_label: None,
+ span_label,
could_be_expr: false,
suggestion,
module,
@@ -611,17 +656,30 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
self.lookup_assoc_candidate(ident, ns, is_expected, source.is_call())
{
let self_is_available = self.self_value_is_available(path[0].ident.span);
+ // Account for `Foo { field }` when suggesting `self.field` so we result on
+ // `Foo { field: self.field }`.
+ let pre = match source {
+ PathSource::Expr(Some(Expr { kind: ExprKind::Struct(expr), .. }))
+ if expr
+ .fields
+ .iter()
+ .any(|f| f.ident == path[0].ident && f.is_shorthand) =>
+ {
+ format!("{path_str}: ")
+ }
+ _ => String::new(),
+ };
match candidate {
- AssocSuggestion::Field => {
+ AssocSuggestion::Field(field_span) => {
if self_is_available {
- err.span_suggestion(
- span,
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
"you might have meant to use the available field",
- format!("self.{path_str}"),
+ format!("{pre}self."),
Applicability::MachineApplicable,
);
} else {
- err.span_label(span, "a field by this name exists in `Self`");
+ err.span_label(field_span, "a field by that name exists in `Self`");
}
}
AssocSuggestion::MethodWithSelf { called } if self_is_available => {
@@ -630,10 +688,10 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
} else {
"you might have meant to refer to the method"
};
- err.span_suggestion(
- span,
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
msg,
- format!("self.{path_str}"),
+ "self.".to_string(),
Applicability::MachineApplicable,
);
}
@@ -641,10 +699,10 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
| AssocSuggestion::AssocFn { .. }
| AssocSuggestion::AssocConst
| AssocSuggestion::AssocType => {
- err.span_suggestion(
- span,
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
format!("you might have meant to {}", candidate.action()),
- format!("Self::{path_str}"),
+ "Self::".to_string(),
Applicability::MachineApplicable,
);
}
@@ -1419,7 +1477,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
(Res::Def(DefKind::Macro(MacroKind::Bang), _), _) => {
err.span_label(span, fallback_label.to_string());
}
- (Res::Def(DefKind::TyAlias { .. }, def_id), PathSource::Trait(_)) => {
+ (Res::Def(DefKind::TyAlias, def_id), PathSource::Trait(_)) => {
err.span_label(span, "type aliases cannot be used as traits");
if self.r.tcx.sess.is_nightly_build() {
let msg = "you might have meant to use `#![feature(trait_alias)]` instead of a \
@@ -1588,7 +1646,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
err.span_label(span, fallback_label.to_string());
err.note("can't use `Self` as a constructor, you must use the implemented struct");
}
- (Res::Def(DefKind::TyAlias { .. } | DefKind::AssocTy, _), _) if ns == ValueNS => {
+ (Res::Def(DefKind::TyAlias | DefKind::AssocTy, _), _) if ns == ValueNS => {
err.note("can't use a type alias as a constructor");
}
_ => return false,
@@ -1657,11 +1715,11 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> {
resolution.full_res()
{
if let Some(field_ids) = self.r.field_def_ids(did) {
- if field_ids
+ if let Some(field_id) = field_ids
.iter()
- .any(|&field_id| ident.name == self.r.tcx.item_name(field_id))
+ .find(|&&field_id| ident.name == self.r.tcx.item_name(field_id))
{
- return Some(AssocSuggestion::Field);
+ return Some(AssocSuggestion::Field(self.r.def_span(*field_id)));
}
}
}
diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs
index 76e54e60d..949c6ab5a 100644
--- a/compiler/rustc_resolve/src/lib.rs
+++ b/compiler/rustc_resolve/src/lib.rs
@@ -18,7 +18,7 @@
#![recursion_limit = "256"]
#![allow(rustdoc::private_intra_doc_links)]
#![allow(rustc::potential_query_instability)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#[macro_use]
extern crate tracing;
@@ -34,18 +34,20 @@ use rustc_ast::{AngleBracketedArg, Crate, Expr, ExprKind, GenericArg, GenericArg
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet};
use rustc_data_structures::intern::Interned;
use rustc_data_structures::steal::Steal;
-use rustc_data_structures::sync::{Lrc, MappedReadGuard};
+use rustc_data_structures::sync::{FreezeReadGuard, Lrc};
use rustc_errors::{
Applicability, DiagnosticBuilder, DiagnosticMessage, ErrorGuaranteed, SubdiagnosticMessage,
};
use rustc_expand::base::{DeriveResolutions, SyntaxExtension, SyntaxExtensionKind};
+use rustc_feature::BUILTIN_ATTRIBUTES;
use rustc_fluent_macro::fluent_messages;
use rustc_hir::def::Namespace::{self, *};
+use rustc_hir::def::NonMacroAttrKind;
use rustc_hir::def::{self, CtorOf, DefKind, DocLinkResMap, LifetimeRes, PartialRes, PerNS};
use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LocalDefIdMap, LocalDefIdSet};
use rustc_hir::def_id::{CRATE_DEF_ID, LOCAL_CRATE};
use rustc_hir::definitions::DefPathData;
-use rustc_hir::TraitCandidate;
+use rustc_hir::{PrimTy, TraitCandidate};
use rustc_index::IndexVec;
use rustc_metadata::creader::{CStore, CrateLoader};
use rustc_middle::metadata::ModChild;
@@ -184,8 +186,8 @@ struct BindingError {
#[derive(Debug)]
enum ResolutionError<'a> {
- /// Error E0401: can't use type or const parameters from outer function.
- GenericParamsFromOuterFunction(Res, HasGenericParams),
+ /// Error E0401: can't use type or const parameters from outer item.
+ GenericParamsFromOuterItem(Res, HasGenericParams),
/// Error E0403: the name is already used for a type or const parameter in this generic
/// parameter list.
NameAlreadyUsedInParameterList(Symbol, Span),
@@ -517,7 +519,7 @@ struct ModuleData<'a> {
/// All modules are unique and allocated on a same arena,
/// so we can use referential equality to compare them.
-#[derive(Clone, Copy, PartialEq)]
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
#[rustc_pass_by_value]
struct Module<'a>(Interned<'a, ModuleData<'a>>);
@@ -879,14 +881,33 @@ impl<'a> NameBindingData<'a> {
invoc_parent_expansion.is_descendant_of(self_parent_expansion);
!(certainly_before_other_or_simultaneously || certainly_before_invoc_or_simultaneously)
}
+
+ // Its purpose is to postpone the determination of a single binding because
+ // we can't predict whether it will be overwritten by recently expanded macros.
+ // FIXME: How can we integrate it with the `update_resolution`?
+ fn determined(&self) -> bool {
+ match &self.kind {
+ NameBindingKind::Import { binding, import, .. } if import.is_glob() => {
+ import.parent_scope.module.unexpanded_invocations.borrow().is_empty()
+ && binding.determined()
+ }
+ _ => true,
+ }
+ }
}
#[derive(Default, Clone)]
struct ExternPreludeEntry<'a> {
- extern_crate_item: Option<NameBinding<'a>>,
+ binding: Option<NameBinding<'a>>,
introduced_by_item: bool,
}
+impl ExternPreludeEntry<'_> {
+ fn is_import(&self) -> bool {
+ self.binding.is_some_and(|binding| binding.is_import())
+ }
+}
+
/// Used for better errors for E0773
enum BuiltinMacroState {
NotYetSeen(SyntaxExtensionKind),
@@ -981,7 +1002,6 @@ pub struct Resolver<'a, 'tcx> {
glob_map: FxHashMap<LocalDefId, FxHashSet<Symbol>>,
/// Visibilities in "lowered" form, for all entities that have them.
visibilities: FxHashMap<LocalDefId, ty::Visibility>,
- has_pub_restricted: bool,
used_imports: FxHashSet<NodeId>,
maybe_unused_trait_imports: FxIndexSet<LocalDefId>,
@@ -996,6 +1016,12 @@ pub struct Resolver<'a, 'tcx> {
arenas: &'a ResolverArenas<'a>,
dummy_binding: NameBinding<'a>,
+ builtin_types_bindings: FxHashMap<Symbol, NameBinding<'a>>,
+ builtin_attrs_bindings: FxHashMap<Symbol, NameBinding<'a>>,
+ registered_tool_bindings: FxHashMap<Ident, NameBinding<'a>>,
+ /// Binding for implicitly declared names that come with a module,
+ /// like `self` (not yet used), or `crate`/`$crate` (for root modules).
+ module_self_bindings: FxHashMap<Module<'a>, NameBinding<'a>>,
used_extern_options: FxHashSet<Symbol>,
macro_names: FxHashSet<Ident>,
@@ -1033,7 +1059,7 @@ pub struct Resolver<'a, 'tcx> {
/// `macro_rules` scopes produced by `macro_rules` item definitions.
macro_rules_scopes: FxHashMap<LocalDefId, MacroRulesScopeRef<'a>>,
/// Helper attributes that are in scope for the given expansion.
- helper_attrs: FxHashMap<LocalExpnId, Vec<Ident>>,
+ helper_attrs: FxHashMap<LocalExpnId, Vec<(Ident, NameBinding<'a>)>>,
/// Ready or in-progress results of resolving paths inside the `#[derive(...)]` attribute
/// with the given `ExpnId`.
derive_data: FxHashMap<LocalExpnId, DeriveData>,
@@ -1111,6 +1137,7 @@ impl<'a> ResolverArenas<'a> {
span: Span,
no_implicit_prelude: bool,
module_map: &mut FxHashMap<DefId, Module<'a>>,
+ module_self_bindings: &mut FxHashMap<Module<'a>, NameBinding<'a>>,
) -> Module<'a> {
let module = Module(Interned::new_unchecked(self.modules.alloc(ModuleData::new(
parent,
@@ -1125,6 +1152,9 @@ impl<'a> ResolverArenas<'a> {
}
if let Some(def_id) = def_id {
module_map.insert(def_id, module);
+ let vis = ty::Visibility::<DefId>::Public;
+ let binding = (module, vis, module.span, LocalExpnId::ROOT).to_name_binding(self);
+ module_self_bindings.insert(module, binding);
}
module
}
@@ -1236,6 +1266,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
) -> Resolver<'a, 'tcx> {
let root_def_id = CRATE_DEF_ID.to_def_id();
let mut module_map = FxHashMap::default();
+ let mut module_self_bindings = FxHashMap::default();
let graph_root = arenas.new_module(
None,
ModuleKind::Def(DefKind::Mod, root_def_id, kw::Empty),
@@ -1243,6 +1274,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
crate_span,
attr::contains_name(attrs, sym::no_implicit_prelude),
&mut module_map,
+ &mut module_self_bindings,
);
let empty_module = arenas.new_module(
None,
@@ -1251,6 +1283,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
DUMMY_SP,
true,
&mut FxHashMap::default(),
+ &mut FxHashMap::default(),
);
let mut visibilities = FxHashMap::default();
@@ -1283,6 +1316,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
let registered_tools = tcx.registered_tools(());
let features = tcx.features();
+ let pub_vis = ty::Visibility::<DefId>::Public;
let mut resolver = Resolver {
tcx,
@@ -1320,7 +1354,6 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
glob_map: Default::default(),
visibilities,
- has_pub_restricted: false,
used_imports: FxHashSet::default(),
maybe_unused_trait_imports: Default::default(),
@@ -1330,14 +1363,33 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
macro_expanded_macro_export_errors: BTreeSet::new(),
arenas,
- dummy_binding: arenas.alloc_name_binding(NameBindingData {
- kind: NameBindingKind::Res(Res::Err),
- ambiguity: None,
- warn_ambiguity: false,
- expansion: LocalExpnId::ROOT,
- span: DUMMY_SP,
- vis: ty::Visibility::Public,
- }),
+ dummy_binding: (Res::Err, pub_vis, DUMMY_SP, LocalExpnId::ROOT).to_name_binding(arenas),
+ builtin_types_bindings: PrimTy::ALL
+ .iter()
+ .map(|prim_ty| {
+ let binding = (Res::PrimTy(*prim_ty), pub_vis, DUMMY_SP, LocalExpnId::ROOT)
+ .to_name_binding(arenas);
+ (prim_ty.name(), binding)
+ })
+ .collect(),
+ builtin_attrs_bindings: BUILTIN_ATTRIBUTES
+ .iter()
+ .map(|builtin_attr| {
+ let res = Res::NonMacroAttr(NonMacroAttrKind::Builtin(builtin_attr.name));
+ let binding =
+ (res, pub_vis, DUMMY_SP, LocalExpnId::ROOT).to_name_binding(arenas);
+ (builtin_attr.name, binding)
+ })
+ .collect(),
+ registered_tool_bindings: registered_tools
+ .iter()
+ .map(|ident| {
+ let binding = (Res::ToolMod, pub_vis, ident.span, LocalExpnId::ROOT)
+ .to_name_binding(arenas);
+ (*ident, binding)
+ })
+ .collect(),
+ module_self_bindings,
used_extern_options: Default::default(),
macro_names: FxHashSet::default(),
@@ -1407,7 +1459,16 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
no_implicit_prelude: bool,
) -> Module<'a> {
let module_map = &mut self.module_map;
- self.arenas.new_module(parent, kind, expn_id, span, no_implicit_prelude, module_map)
+ let module_self_bindings = &mut self.module_self_bindings;
+ self.arenas.new_module(
+ parent,
+ kind,
+ expn_id,
+ span,
+ no_implicit_prelude,
+ module_map,
+ module_self_bindings,
+ )
}
fn next_node_id(&mut self) -> NodeId {
@@ -1436,7 +1497,6 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
let proc_macros = self.proc_macros.iter().map(|id| self.local_def_id(*id)).collect();
let expn_that_defined = self.expn_that_defined;
let visibilities = self.visibilities;
- let has_pub_restricted = self.has_pub_restricted;
let extern_crate_map = self.extern_crate_map;
let maybe_unused_trait_imports = self.maybe_unused_trait_imports;
let glob_map = self.glob_map;
@@ -1454,7 +1514,6 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
let global_ctxt = ResolverGlobalCtxt {
expn_that_defined,
visibilities,
- has_pub_restricted,
effective_visibilities,
extern_crate_map,
module_children: self.module_children,
@@ -1498,7 +1557,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
))
}
- fn cstore(&self) -> MappedReadGuard<'_, CStore> {
+ fn cstore(&self) -> FreezeReadGuard<'_, CStore> {
CStore::from_tcx(self.tcx)
}
@@ -1553,7 +1612,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
});
// Make sure we don't mutate the cstore from here on.
- self.tcx.untracked().cstore.leak();
+ self.tcx.untracked().cstore.freeze();
}
fn traits_in_scope(
@@ -1727,7 +1786,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
// but not introduce it, as used if they are accessed from lexical scope.
if is_lexical_scope {
if let Some(entry) = self.extern_prelude.get(&ident.normalize_to_macros_2_0()) {
- if !entry.introduced_by_item && entry.extern_crate_item == Some(used_binding) {
+ if !entry.introduced_by_item && entry.binding == Some(used_binding) {
return;
}
}
@@ -1885,12 +1944,18 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
// Make sure `self`, `super` etc produce an error when passed to here.
return None;
}
- self.extern_prelude.get(&ident.normalize_to_macros_2_0()).cloned().and_then(|entry| {
- if let Some(binding) = entry.extern_crate_item {
- if finalize && entry.introduced_by_item {
- self.record_use(ident, binding, false);
+
+ let norm_ident = ident.normalize_to_macros_2_0();
+ let binding = self.extern_prelude.get(&norm_ident).cloned().and_then(|entry| {
+ Some(if let Some(binding) = entry.binding {
+ if finalize {
+ if !entry.is_import() {
+ self.crate_loader(|c| c.process_path_extern(ident.name, ident.span));
+ } else if entry.introduced_by_item {
+ self.record_use(ident, binding, false);
+ }
}
- Some(binding)
+ binding
} else {
let crate_id = if finalize {
let Some(crate_id) =
@@ -1903,10 +1968,16 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
self.crate_loader(|c| c.maybe_process_path_extern(ident.name))?
};
let crate_root = self.expect_module(crate_id.as_def_id());
- let vis = ty::Visibility::<LocalDefId>::Public;
- Some((crate_root, vis, DUMMY_SP, LocalExpnId::ROOT).to_name_binding(self.arenas))
- }
- })
+ let vis = ty::Visibility::<DefId>::Public;
+ (crate_root, vis, DUMMY_SP, LocalExpnId::ROOT).to_name_binding(self.arenas)
+ })
+ });
+
+ if let Some(entry) = self.extern_prelude.get_mut(&norm_ident) {
+ entry.binding = binding;
+ }
+
+ binding
}
/// Rustdoc uses this to resolve doc link paths in a recoverable way. `PathResult<'a>`
diff --git a/compiler/rustc_resolve/src/macros.rs b/compiler/rustc_resolve/src/macros.rs
index 6a5b675b4..90ae08ce3 100644
--- a/compiler/rustc_resolve/src/macros.rs
+++ b/compiler/rustc_resolve/src/macros.rs
@@ -2,12 +2,13 @@
//! interface provided by `Resolver` to macro expander.
use crate::errors::{
- self, AddAsNonDerive, CannotFindIdentInThisScope, MacroExpectedFound, RemoveSurroundingDerive,
+ self, AddAsNonDerive, CannotDetermineMacroResolution, CannotFindIdentInThisScope,
+ MacroExpectedFound, RemoveSurroundingDerive,
};
use crate::Namespace::*;
use crate::{BuiltinMacroState, Determinacy};
use crate::{DeriveData, Finalize, ParentScope, ResolutionError, Resolver, ScopeSet};
-use crate::{ModuleKind, ModuleOrUniformRoot, NameBinding, PathResult, Segment};
+use crate::{ModuleKind, ModuleOrUniformRoot, NameBinding, PathResult, Segment, ToNameBinding};
use rustc_ast::expand::StrippedCfgItem;
use rustc_ast::{self as ast, attr, Crate, Inline, ItemKind, ModKind, NodeId};
use rustc_ast_pretty::pprust;
@@ -20,12 +21,12 @@ use rustc_expand::base::{SyntaxExtension, SyntaxExtensionKind};
use rustc_expand::compile_declarative_macro;
use rustc_expand::expand::{AstFragment, Invocation, InvocationKind, SupportsMacroExpansion};
use rustc_hir::def::{self, DefKind, NonMacroAttrKind};
-use rustc_hir::def_id::{CrateNum, LocalDefId};
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId};
use rustc_middle::middle::stability;
use rustc_middle::ty::RegisteredTools;
-use rustc_middle::ty::TyCtxt;
+use rustc_middle::ty::{TyCtxt, Visibility};
use rustc_session::lint::builtin::{
- LEGACY_DERIVE_HELPERS, SOFT_UNSTABLE, UNKNOWN_DIAGNOSTIC_ATTRIBUTES,
+ LEGACY_DERIVE_HELPERS, SOFT_UNSTABLE, UNKNOWN_OR_MALFORMED_DIAGNOSTIC_ATTRIBUTES,
};
use rustc_session::lint::builtin::{UNUSED_MACROS, UNUSED_MACRO_RULES};
use rustc_session::lint::BuiltinLintDiagnostics;
@@ -401,8 +402,17 @@ impl<'a, 'tcx> ResolverExpand for Resolver<'a, 'tcx> {
}
// Sort helpers in a stable way independent from the derive resolution order.
entry.helper_attrs.sort_by_key(|(i, _)| *i);
- self.helper_attrs
- .insert(expn_id, entry.helper_attrs.iter().map(|(_, ident)| *ident).collect());
+ let helper_attrs = entry
+ .helper_attrs
+ .iter()
+ .map(|(_, ident)| {
+ let res = Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper);
+ let binding = (res, Visibility::<DefId>::Public, ident.span, expn_id)
+ .to_name_binding(self.arenas);
+ (*ident, binding)
+ })
+ .collect();
+ self.helper_attrs.insert(expn_id, helper_attrs);
// Mark this derive as having `Copy` either if it has `Copy` itself or if its parent derive
// has `Copy`, to support cases like `#[derive(Clone, Copy)] #[derive(Debug)]`.
if entry.has_derive_copy || self.has_derive_copy(parent_scope.expansion) {
@@ -600,9 +610,10 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
if res == Res::NonMacroAttr(NonMacroAttrKind::Tool)
&& path.segments.len() >= 2
&& path.segments[0].ident.name == sym::diagnostic
+ && path.segments[1].ident.name != sym::on_unimplemented
{
self.tcx.sess.parse_sess.buffer_lint(
- UNKNOWN_DIAGNOSTIC_ATTRIBUTES,
+ UNKNOWN_OR_MALFORMED_DIAGNOSTIC_ATTRIBUTES,
path.segments[1].span(),
node_id,
"unknown diagnostic attribute",
@@ -710,13 +721,11 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> {
// even if speculative `resolve_path` returned nothing previously, so we skip this
// less informative error if the privacy error is reported elsewhere.
if this.privacy_errors.is_empty() {
- let msg = format!(
- "cannot determine resolution for the {} `{}`",
- kind.descr(),
- Segment::names_to_string(path)
- );
- let msg_note = "import resolution is stuck, try simplifying macro imports";
- this.tcx.sess.struct_span_err(span, msg).note(msg_note).emit();
+ this.tcx.sess.emit_err(CannotDetermineMacroResolution {
+ span,
+ kind: kind.descr(),
+ path: Segment::names_to_string(path),
+ });
}
}
};
diff --git a/compiler/rustc_resolve/src/rustdoc.rs b/compiler/rustc_resolve/src/rustdoc.rs
index ba7417b6d..7c41c32d0 100644
--- a/compiler/rustc_resolve/src/rustdoc.rs
+++ b/compiler/rustc_resolve/src/rustdoc.rs
@@ -2,9 +2,11 @@ use pulldown_cmark::{BrokenLink, CowStr, Event, LinkType, Options, Parser, Tag};
use rustc_ast as ast;
use rustc_ast::util::comments::beautify_doc_string;
use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::ty::TyCtxt;
use rustc_span::def_id::DefId;
use rustc_span::symbol::{kw, sym, Symbol};
-use rustc_span::Span;
+use rustc_span::{InnerSpan, Span, DUMMY_SP};
+use std::ops::Range;
use std::{cmp, mem};
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
@@ -462,3 +464,88 @@ fn collect_link_data<'input, 'callback>(
display_text.map(String::into_boxed_str)
}
+
+/// Returns a span encompassing all the document fragments.
+pub fn span_of_fragments(fragments: &[DocFragment]) -> Option<Span> {
+ if fragments.is_empty() {
+ return None;
+ }
+ let start = fragments[0].span;
+ if start == DUMMY_SP {
+ return None;
+ }
+ let end = fragments.last().expect("no doc strings provided").span;
+ Some(start.to(end))
+}
+
+/// Attempts to match a range of bytes from parsed markdown to a `Span` in the source code.
+///
+/// This method will return `None` if we cannot construct a span from the source map or if the
+/// fragments are not all sugared doc comments. It's difficult to calculate the correct span in
+/// that case due to escaping and other source features.
+pub fn source_span_for_markdown_range(
+ tcx: TyCtxt<'_>,
+ markdown: &str,
+ md_range: &Range<usize>,
+ fragments: &[DocFragment],
+) -> Option<Span> {
+ let is_all_sugared_doc = fragments.iter().all(|frag| frag.kind == DocFragmentKind::SugaredDoc);
+
+ if !is_all_sugared_doc {
+ return None;
+ }
+
+ let snippet = tcx.sess.source_map().span_to_snippet(span_of_fragments(fragments)?).ok()?;
+
+ let starting_line = markdown[..md_range.start].matches('\n').count();
+ let ending_line = starting_line + markdown[md_range.start..md_range.end].matches('\n').count();
+
+ // We use `split_terminator('\n')` instead of `lines()` when counting bytes so that we treat
+ // CRLF and LF line endings the same way.
+ let mut src_lines = snippet.split_terminator('\n');
+ let md_lines = markdown.split_terminator('\n');
+
+ // The number of bytes from the source span to the markdown span that are not part
+ // of the markdown, like comment markers.
+ let mut start_bytes = 0;
+ let mut end_bytes = 0;
+
+ 'outer: for (line_no, md_line) in md_lines.enumerate() {
+ loop {
+ let source_line = src_lines.next()?;
+ match source_line.find(md_line) {
+ Some(offset) => {
+ if line_no == starting_line {
+ start_bytes += offset;
+
+ if starting_line == ending_line {
+ break 'outer;
+ }
+ } else if line_no == ending_line {
+ end_bytes += offset;
+ break 'outer;
+ } else if line_no < starting_line {
+ start_bytes += source_line.len() - md_line.len();
+ } else {
+ end_bytes += source_line.len() - md_line.len();
+ }
+ break;
+ }
+ None => {
+ // Since this is a source line that doesn't include a markdown line,
+ // we have to count the newline that we split from earlier.
+ if line_no <= starting_line {
+ start_bytes += source_line.len() + 1;
+ } else {
+ end_bytes += source_line.len() + 1;
+ }
+ }
+ }
+ }
+ }
+
+ Some(span_of_fragments(fragments)?.from_inner(InnerSpan::new(
+ md_range.start + start_bytes,
+ md_range.end + start_bytes + end_bytes,
+ )))
+}
diff --git a/compiler/rustc_serialize/src/leb128.rs b/compiler/rustc_serialize/src/leb128.rs
index e568b9e67..ca661bac7 100644
--- a/compiler/rustc_serialize/src/leb128.rs
+++ b/compiler/rustc_serialize/src/leb128.rs
@@ -15,23 +15,20 @@ pub const fn largest_max_leb128_len() -> usize {
macro_rules! impl_write_unsigned_leb128 {
($fn_name:ident, $int_ty:ty) => {
#[inline]
- pub fn $fn_name(
- out: &mut [::std::mem::MaybeUninit<u8>; max_leb128_len::<$int_ty>()],
- mut value: $int_ty,
- ) -> &[u8] {
+ pub fn $fn_name(out: &mut [u8; max_leb128_len::<$int_ty>()], mut value: $int_ty) -> usize {
let mut i = 0;
loop {
if value < 0x80 {
unsafe {
- *out.get_unchecked_mut(i).as_mut_ptr() = value as u8;
+ *out.get_unchecked_mut(i) = value as u8;
}
i += 1;
break;
} else {
unsafe {
- *out.get_unchecked_mut(i).as_mut_ptr() = ((value & 0x7f) | 0x80) as u8;
+ *out.get_unchecked_mut(i) = ((value & 0x7f) | 0x80) as u8;
}
value >>= 7;
@@ -39,7 +36,7 @@ macro_rules! impl_write_unsigned_leb128 {
}
}
- unsafe { ::std::mem::MaybeUninit::slice_assume_init_ref(&out.get_unchecked(..i)) }
+ i
}
};
}
@@ -87,10 +84,7 @@ impl_read_unsigned_leb128!(read_usize_leb128, usize);
macro_rules! impl_write_signed_leb128 {
($fn_name:ident, $int_ty:ty) => {
#[inline]
- pub fn $fn_name(
- out: &mut [::std::mem::MaybeUninit<u8>; max_leb128_len::<$int_ty>()],
- mut value: $int_ty,
- ) -> &[u8] {
+ pub fn $fn_name(out: &mut [u8; max_leb128_len::<$int_ty>()], mut value: $int_ty) -> usize {
let mut i = 0;
loop {
@@ -104,7 +98,7 @@ macro_rules! impl_write_signed_leb128 {
}
unsafe {
- *out.get_unchecked_mut(i).as_mut_ptr() = byte;
+ *out.get_unchecked_mut(i) = byte;
}
i += 1;
@@ -114,7 +108,7 @@ macro_rules! impl_write_signed_leb128 {
}
}
- unsafe { ::std::mem::MaybeUninit::slice_assume_init_ref(&out.get_unchecked(..i)) }
+ i
}
};
}
diff --git a/compiler/rustc_serialize/src/lib.rs b/compiler/rustc_serialize/src/lib.rs
index ce8503918..dd40b3cf0 100644
--- a/compiler/rustc_serialize/src/lib.rs
+++ b/compiler/rustc_serialize/src/lib.rs
@@ -17,6 +17,9 @@ Core encoding and decoding interfaces.
#![feature(new_uninit)]
#![feature(allocator_api)]
#![feature(ptr_sub_ptr)]
+#![feature(slice_first_last_chunk)]
+#![feature(inline_const)]
+#![feature(const_option)]
#![cfg_attr(test, feature(test))]
#![allow(rustc::internal)]
#![deny(rustc::untranslatable_diagnostic)]
diff --git a/compiler/rustc_serialize/src/opaque.rs b/compiler/rustc_serialize/src/opaque.rs
index 0ffc537ee..552554390 100644
--- a/compiler/rustc_serialize/src/opaque.rs
+++ b/compiler/rustc_serialize/src/opaque.rs
@@ -3,10 +3,8 @@ use crate::serialize::{Decodable, Decoder, Encodable, Encoder};
use std::fs::File;
use std::io::{self, Write};
use std::marker::PhantomData;
-use std::mem::MaybeUninit;
use std::ops::Range;
use std::path::Path;
-use std::ptr;
// -----------------------------------------------------------------------------
// Encoder
@@ -24,10 +22,12 @@ const BUF_SIZE: usize = 8192;
/// size of the buffer, rather than the full length of the encoded data, and
/// because it doesn't need to reallocate memory along the way.
pub struct FileEncoder {
- /// The input buffer. For adequate performance, we need more control over
- /// buffering than `BufWriter` offers. If `BufWriter` ever offers a raw
- /// buffer access API, we can use it, and remove `buf` and `buffered`.
- buf: Box<[MaybeUninit<u8>]>,
+ // The input buffer. For adequate performance, we need to be able to write
+ // directly to the unwritten region of the buffer, without calling copy_from_slice.
+ // Note that our buffer is always initialized so that we can do that direct access
+ // without unsafe code. Users of this type write many more than BUF_SIZE bytes, so the
+ // initialization is approximately free.
+ buf: Box<[u8; BUF_SIZE]>,
buffered: usize,
flushed: usize,
file: File,
@@ -38,12 +38,13 @@ pub struct FileEncoder {
impl FileEncoder {
pub fn new<P: AsRef<Path>>(path: P) -> io::Result<Self> {
- // Create the file for reading and writing, because some encoders do both
- // (e.g. the metadata encoder when -Zmeta-stats is enabled)
+ // File::create opens the file for writing only. When -Zmeta-stats is enabled, the metadata
+ // encoder rewinds the file to inspect what was written. So we need to always open the file
+ // for reading and writing.
let file = File::options().read(true).write(true).create(true).truncate(true).open(path)?;
Ok(FileEncoder {
- buf: Box::new_uninit_slice(BUF_SIZE),
+ buf: vec![0u8; BUF_SIZE].into_boxed_slice().try_into().unwrap(),
buffered: 0,
flushed: 0,
file,
@@ -54,94 +55,19 @@ impl FileEncoder {
#[inline]
pub fn position(&self) -> usize {
// Tracking position this way instead of having a `self.position` field
- // means that we don't have to update the position on every write call.
+ // means that we only need to update `self.buffered` on a write call,
+ // as opposed to updating `self.position` and `self.buffered`.
self.flushed + self.buffered
}
+ #[cold]
+ #[inline(never)]
pub fn flush(&mut self) {
- // This is basically a copy of `BufWriter::flush`. If `BufWriter` ever
- // offers a raw buffer access API, we can use it, and remove this.
-
- /// Helper struct to ensure the buffer is updated after all the writes
- /// are complete. It tracks the number of written bytes and drains them
- /// all from the front of the buffer when dropped.
- struct BufGuard<'a> {
- buffer: &'a mut [u8],
- encoder_buffered: &'a mut usize,
- encoder_flushed: &'a mut usize,
- flushed: usize,
- }
-
- impl<'a> BufGuard<'a> {
- fn new(
- buffer: &'a mut [u8],
- encoder_buffered: &'a mut usize,
- encoder_flushed: &'a mut usize,
- ) -> Self {
- assert_eq!(buffer.len(), *encoder_buffered);
- Self { buffer, encoder_buffered, encoder_flushed, flushed: 0 }
- }
-
- /// The unwritten part of the buffer
- fn remaining(&self) -> &[u8] {
- &self.buffer[self.flushed..]
- }
-
- /// Flag some bytes as removed from the front of the buffer
- fn consume(&mut self, amt: usize) {
- self.flushed += amt;
- }
-
- /// true if all of the bytes have been written
- fn done(&self) -> bool {
- self.flushed >= *self.encoder_buffered
- }
- }
-
- impl Drop for BufGuard<'_> {
- fn drop(&mut self) {
- if self.flushed > 0 {
- if self.done() {
- *self.encoder_flushed += *self.encoder_buffered;
- *self.encoder_buffered = 0;
- } else {
- self.buffer.copy_within(self.flushed.., 0);
- *self.encoder_flushed += self.flushed;
- *self.encoder_buffered -= self.flushed;
- }
- }
- }
- }
-
- // If we've already had an error, do nothing. It'll get reported after
- // `finish` is called.
- if self.res.is_err() {
- return;
- }
-
- let mut guard = BufGuard::new(
- unsafe { MaybeUninit::slice_assume_init_mut(&mut self.buf[..self.buffered]) },
- &mut self.buffered,
- &mut self.flushed,
- );
-
- while !guard.done() {
- match self.file.write(guard.remaining()) {
- Ok(0) => {
- self.res = Err(io::Error::new(
- io::ErrorKind::WriteZero,
- "failed to write the buffered data",
- ));
- return;
- }
- Ok(n) => guard.consume(n),
- Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
- Err(e) => {
- self.res = Err(e);
- return;
- }
- }
+ if self.res.is_ok() {
+ self.res = self.file.write_all(&self.buf[..self.buffered]);
}
+ self.flushed += self.buffered;
+ self.buffered = 0;
}
pub fn file(&self) -> &File {
@@ -149,91 +75,89 @@ impl FileEncoder {
}
#[inline]
- fn write_one(&mut self, value: u8) {
- let mut buffered = self.buffered;
-
- if std::intrinsics::unlikely(buffered + 1 > BUF_SIZE) {
- self.flush();
- buffered = 0;
- }
+ fn buffer_empty(&mut self) -> &mut [u8] {
+ // SAFETY: self.buffered is inbounds as an invariant of the type
+ unsafe { self.buf.get_unchecked_mut(self.buffered..) }
+ }
- // SAFETY: The above check and `flush` ensures that there is enough
- // room to write the input to the buffer.
- unsafe {
- *MaybeUninit::slice_as_mut_ptr(&mut self.buf).add(buffered) = value;
+ #[cold]
+ #[inline(never)]
+ fn write_all_cold_path(&mut self, buf: &[u8]) {
+ self.flush();
+ if let Some(dest) = self.buf.get_mut(..buf.len()) {
+ dest.copy_from_slice(buf);
+ self.buffered += buf.len();
+ } else {
+ if self.res.is_ok() {
+ self.res = self.file.write_all(buf);
+ }
+ self.flushed += buf.len();
}
-
- self.buffered = buffered + 1;
}
#[inline]
fn write_all(&mut self, buf: &[u8]) {
- let buf_len = buf.len();
-
- if std::intrinsics::likely(buf_len <= BUF_SIZE) {
- let mut buffered = self.buffered;
-
- if std::intrinsics::unlikely(buffered + buf_len > BUF_SIZE) {
- self.flush();
- buffered = 0;
- }
-
- // SAFETY: The above check and `flush` ensures that there is enough
- // room to write the input to the buffer.
- unsafe {
- let src = buf.as_ptr();
- let dst = MaybeUninit::slice_as_mut_ptr(&mut self.buf).add(buffered);
- ptr::copy_nonoverlapping(src, dst, buf_len);
- }
-
- self.buffered = buffered + buf_len;
+ if let Some(dest) = self.buffer_empty().get_mut(..buf.len()) {
+ dest.copy_from_slice(buf);
+ self.buffered += buf.len();
} else {
- self.write_all_unbuffered(buf);
+ self.write_all_cold_path(buf);
}
}
- fn write_all_unbuffered(&mut self, mut buf: &[u8]) {
- // If we've already had an error, do nothing. It'll get reported after
- // `finish` is called.
- if self.res.is_err() {
- return;
- }
-
- if self.buffered > 0 {
+ /// Write up to `N` bytes to this encoder.
+ ///
+ /// This function can be used to avoid the overhead of calling memcpy for writes that
+ /// have runtime-variable length, but are small and have a small fixed upper bound.
+ ///
+ /// This can be used to do in-place encoding as is done for leb128 (without this function
+ /// we would need to write to a temporary buffer then memcpy into the encoder), and it can
+ /// also be used to implement the varint scheme we use for rmeta and dep graph encoding,
+ /// where we only want to encode the first few bytes of an integer. Copying in the whole
+ /// integer then only advancing the encoder state for the few bytes we care about is more
+ /// efficient than calling [`FileEncoder::write_all`], because variable-size copies are
+ /// always lowered to `memcpy`, which has overhead and contains a lot of logic we can bypass
+ /// with this function. Note that common architectures support fixed-size writes up to 8 bytes
+ /// with one instruction, so while this does in some sense do wasted work, we come out ahead.
+ #[inline]
+ pub fn write_with<const N: usize>(&mut self, visitor: impl FnOnce(&mut [u8; N]) -> usize) {
+ let flush_threshold = const { BUF_SIZE.checked_sub(N).unwrap() };
+ if std::intrinsics::unlikely(self.buffered > flush_threshold) {
self.flush();
}
-
- // This is basically a copy of `Write::write_all` but also updates our
- // `self.flushed`. It's necessary because `Write::write_all` does not
- // return the number of bytes written when an error is encountered, and
- // without that, we cannot accurately update `self.flushed` on error.
- while !buf.is_empty() {
- match self.file.write(buf) {
- Ok(0) => {
- self.res = Err(io::Error::new(
- io::ErrorKind::WriteZero,
- "failed to write whole buffer",
- ));
- return;
- }
- Ok(n) => {
- buf = &buf[n..];
- self.flushed += n;
- }
- Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
- Err(e) => {
- self.res = Err(e);
- return;
- }
- }
+ // SAFETY: We checked above that that N < self.buffer_empty().len(),
+ // and if isn't, flush ensures that our empty buffer is now BUF_SIZE.
+ // We produce a post-mono error if N > BUF_SIZE.
+ let buf = unsafe { self.buffer_empty().first_chunk_mut::<N>().unwrap_unchecked() };
+ let written = visitor(buf);
+ // We have to ensure that an errant visitor cannot cause self.buffered to exeed BUF_SIZE.
+ if written > N {
+ Self::panic_invalid_write::<N>(written);
}
+ self.buffered += written;
+ }
+
+ #[cold]
+ #[inline(never)]
+ fn panic_invalid_write<const N: usize>(written: usize) {
+ panic!("FileEncoder::write_with::<{N}> cannot be used to write {written} bytes");
+ }
+
+ /// Helper for calls where [`FileEncoder::write_with`] always writes the whole array.
+ #[inline]
+ pub fn write_array<const N: usize>(&mut self, buf: [u8; N]) {
+ self.write_with(|dest| {
+ *dest = buf;
+ N
+ })
}
pub fn finish(mut self) -> Result<usize, io::Error> {
self.flush();
-
- let res = std::mem::replace(&mut self.res, Ok(()));
- res.map(|()| self.position())
+ match std::mem::replace(&mut self.res, Ok(())) {
+ Ok(()) => Ok(self.position()),
+ Err(e) => Err(e),
+ }
}
}
@@ -241,7 +165,7 @@ impl Drop for FileEncoder {
fn drop(&mut self) {
// Likely to be a no-op, because `finish` should have been called and
// it also flushes. But do it just in case.
- let _result = self.flush();
+ self.flush();
}
}
@@ -249,26 +173,7 @@ macro_rules! write_leb128 {
($this_fn:ident, $int_ty:ty, $write_leb_fn:ident) => {
#[inline]
fn $this_fn(&mut self, v: $int_ty) {
- const MAX_ENCODED_LEN: usize = $crate::leb128::max_leb128_len::<$int_ty>();
-
- let mut buffered = self.buffered;
-
- // This can't overflow because BUF_SIZE and MAX_ENCODED_LEN are both
- // quite small.
- if std::intrinsics::unlikely(buffered + MAX_ENCODED_LEN > BUF_SIZE) {
- self.flush();
- buffered = 0;
- }
-
- // SAFETY: The above check and flush ensures that there is enough
- // room to write the encoded value to the buffer.
- let buf = unsafe {
- &mut *(self.buf.as_mut_ptr().add(buffered)
- as *mut [MaybeUninit<u8>; MAX_ENCODED_LEN])
- };
-
- let encoded = leb128::$write_leb_fn(buf, v);
- self.buffered = buffered + encoded.len();
+ self.write_with(|buf| leb128::$write_leb_fn(buf, v))
}
};
}
@@ -281,12 +186,12 @@ impl Encoder for FileEncoder {
#[inline]
fn emit_u16(&mut self, v: u16) {
- self.write_all(&v.to_le_bytes());
+ self.write_array(v.to_le_bytes());
}
#[inline]
fn emit_u8(&mut self, v: u8) {
- self.write_one(v);
+ self.write_array([v]);
}
write_leb128!(emit_isize, isize, write_isize_leb128);
@@ -296,7 +201,7 @@ impl Encoder for FileEncoder {
#[inline]
fn emit_i16(&mut self, v: i16) {
- self.write_all(&v.to_le_bytes());
+ self.write_array(v.to_le_bytes());
}
#[inline]
@@ -353,7 +258,7 @@ impl<'a> MemDecoder<'a> {
}
#[inline]
- fn read_array<const N: usize>(&mut self) -> [u8; N] {
+ pub fn read_array<const N: usize>(&mut self) -> [u8; N] {
self.read_raw_bytes(N).try_into().unwrap()
}
@@ -495,7 +400,7 @@ impl Encodable<FileEncoder> for IntEncodedWithFixedSize {
#[inline]
fn encode(&self, e: &mut FileEncoder) {
let _start_pos = e.position();
- e.emit_raw_bytes(&self.0.to_le_bytes());
+ e.write_array(self.0.to_le_bytes());
let _end_pos = e.position();
debug_assert_eq!((_end_pos - _start_pos), IntEncodedWithFixedSize::ENCODED_SIZE);
}
diff --git a/compiler/rustc_serialize/tests/leb128.rs b/compiler/rustc_serialize/tests/leb128.rs
index 7872e7784..dc9b32a96 100644
--- a/compiler/rustc_serialize/tests/leb128.rs
+++ b/compiler/rustc_serialize/tests/leb128.rs
@@ -1,8 +1,4 @@
-#![feature(maybe_uninit_slice)]
-#![feature(maybe_uninit_uninit_array)]
-
use rustc_serialize::leb128::*;
-use std::mem::MaybeUninit;
use rustc_serialize::Decoder;
macro_rules! impl_test_unsigned_leb128 {
@@ -24,9 +20,10 @@ macro_rules! impl_test_unsigned_leb128 {
let mut stream = Vec::new();
+ let mut buf = Default::default();
for &x in &values {
- let mut buf = MaybeUninit::uninit_array();
- stream.extend($write_fn_name(&mut buf, x));
+ let n = $write_fn_name(&mut buf, x);
+ stream.extend(&buf[..n]);
}
let mut decoder = rustc_serialize::opaque::MemDecoder::new(&stream, 0);
@@ -70,9 +67,10 @@ macro_rules! impl_test_signed_leb128 {
let mut stream = Vec::new();
+ let mut buf = Default::default();
for &x in &values {
- let mut buf = MaybeUninit::uninit_array();
- stream.extend($write_fn_name(&mut buf, x));
+ let n = $write_fn_name(&mut buf, x);
+ stream.extend(&buf[..n]);
}
let mut decoder = rustc_serialize::opaque::MemDecoder::new(&stream, 0);
diff --git a/compiler/rustc_session/messages.ftl b/compiler/rustc_session/messages.ftl
index b07c6db59..e06b63809 100644
--- a/compiler/rustc_session/messages.ftl
+++ b/compiler/rustc_session/messages.ftl
@@ -8,6 +8,9 @@ session_cannot_mix_and_match_sanitizers = `-Zsanitizer={$first}` is incompatible
session_cgu_not_recorded =
CGU-reuse for `{$cgu_user_name}` is (mangled: `{$cgu_name}`) was not recorded
+session_cli_feature_diagnostic_help =
+ add `-Zcrate-attr="feature({$feature})"` to the command-line options to enable
+
session_crate_name_does_not_match = `--crate-name` and `#[crate_name]` are required to match, but `{$s}` != `{$name}`
session_crate_name_empty = crate name must not be empty
diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs
index f00472f18..d29ab02c1 100644
--- a/compiler/rustc_session/src/config.rs
+++ b/compiler/rustc_session/src/config.rs
@@ -381,6 +381,24 @@ pub enum DebugInfo {
Full,
}
+#[derive(Clone, Copy, Debug, PartialEq, Hash)]
+pub enum DebugInfoCompression {
+ None,
+ Zlib,
+ Zstd,
+}
+
+impl ToString for DebugInfoCompression {
+ fn to_string(&self) -> String {
+ match self {
+ DebugInfoCompression::None => "none",
+ DebugInfoCompression::Zlib => "zlib",
+ DebugInfoCompression::Zstd => "zstd",
+ }
+ .to_owned()
+ }
+}
+
/// Split debug-information is enabled by `-C split-debuginfo`, this enum is only used if split
/// debug-information is enabled (in either `Packed` or `Unpacked` modes), and the platform
/// uses DWARF for debug-information.
@@ -880,6 +898,9 @@ impl OutFileName {
#[derive(Clone, Hash, Debug, HashStable_Generic)]
pub struct OutputFilenames {
pub out_directory: PathBuf,
+ /// Crate name. Never contains '-'.
+ crate_stem: String,
+ /// Typically based on `.rs` input file name. Any '-' is preserved.
filestem: String,
pub single_output_file: Option<OutFileName>,
pub temps_directory: Option<PathBuf>,
@@ -893,6 +914,7 @@ pub const DWARF_OBJECT_EXT: &str = "dwo";
impl OutputFilenames {
pub fn new(
out_directory: PathBuf,
+ out_crate_name: String,
out_filestem: String,
single_output_file: Option<OutFileName>,
temps_directory: Option<PathBuf>,
@@ -904,6 +926,7 @@ impl OutputFilenames {
single_output_file,
temps_directory,
outputs,
+ crate_stem: format!("{out_crate_name}{extra}"),
filestem: format!("{out_filestem}{extra}"),
}
}
@@ -920,7 +943,12 @@ impl OutputFilenames {
/// should be placed on disk.
pub fn output_path(&self, flavor: OutputType) -> PathBuf {
let extension = flavor.extension();
- self.with_directory_and_extension(&self.out_directory, extension)
+ match flavor {
+ OutputType::Metadata => {
+ self.out_directory.join(format!("lib{}.{}", self.crate_stem, extension))
+ }
+ _ => self.with_directory_and_extension(&self.out_directory, extension),
+ }
}
/// Gets the path where a compilation artifact of the given type for the
@@ -1015,6 +1043,7 @@ impl Default for Options {
crate_types: Vec::new(),
optimize: OptLevel::No,
debuginfo: DebugInfo::None,
+ debuginfo_compression: DebugInfoCompression::None,
lint_opts: Vec::new(),
lint_cap: None,
describe_lints: false,
@@ -1067,7 +1096,7 @@ impl Options {
/// Returns `true` if there will be an output file generated.
pub fn will_create_output_file(&self) -> bool {
!self.unstable_opts.parse_only && // The file is just being parsed
- !self.unstable_opts.ls // The file is just being queried
+ self.unstable_opts.ls.is_empty() // The file is just being queried
}
#[inline]
@@ -1084,12 +1113,6 @@ impl Options {
pub fn get_symbol_mangling_version(&self) -> SymbolManglingVersion {
self.cg.symbol_mangling_version.unwrap_or(SymbolManglingVersion::Legacy)
}
-
- #[allow(rustc::bad_opt_access)]
- pub fn incremental_relative_spans(&self) -> bool {
- self.unstable_opts.incremental_relative_spans
- || (self.unstable_features.is_nightly_build() && self.incremental.is_some())
- }
}
impl UnstableOptions {
@@ -2160,12 +2183,6 @@ fn collect_print_requests(
prints.extend(matches.opt_strs("print").into_iter().map(|req| {
let (req, out) = split_out_file_name(&req);
- if out.is_some() && !unstable_opts.unstable_options {
- handler.early_error(
- "the `-Z unstable-options` flag must also be passed to \
- enable the path print option",
- );
- }
let kind = match PRINT_KINDS.iter().find(|&&(name, _)| name == req) {
Some((_, PrintKind::TargetSpec)) => {
if unstable_opts.unstable_options {
@@ -2283,6 +2300,13 @@ fn select_debuginfo(matches: &getopts::Matches, cg: &CodegenOptions) -> DebugInf
if max_g > max_c { DebugInfo::Full } else { cg.debuginfo }
}
+fn select_debuginfo_compression(
+ _handler: &EarlyErrorHandler,
+ unstable_opts: &UnstableOptions,
+) -> DebugInfoCompression {
+ unstable_opts.debuginfo_compression
+}
+
pub(crate) fn parse_assert_incr_state(
handler: &EarlyErrorHandler,
opt_assertion: &Option<String>,
@@ -2451,6 +2475,19 @@ pub fn parse_externs(
Some((opts, name)) => (Some(opts), name.to_string()),
};
+ if !crate::utils::is_ascii_ident(&name) {
+ let mut error = handler.early_struct_error(format!(
+ "crate name `{name}` passed to `--extern` is not a valid ASCII identifier"
+ ));
+ let adjusted_name = name.replace("-", "_");
+ if crate::utils::is_ascii_ident(&adjusted_name) {
+ error.help(format!(
+ "consider replacing the dashes with underscores: `{adjusted_name}`"
+ ));
+ }
+ error.emit();
+ }
+
let path = path.map(|p| CanonicalizedPath::new(p));
let entry = externs.entry(name.to_owned());
@@ -2758,6 +2795,8 @@ pub fn build_session_options(
// for more details.
let debug_assertions = cg.debug_assertions.unwrap_or(opt_level == OptLevel::No);
let debuginfo = select_debuginfo(matches, &cg);
+ let debuginfo_compression: DebugInfoCompression =
+ select_debuginfo_compression(handler, &unstable_opts);
let mut search_paths = vec![];
for s in &matches.opt_strs("L") {
@@ -2834,6 +2873,7 @@ pub fn build_session_options(
crate_types,
optimize: opt_level,
debuginfo,
+ debuginfo_compression,
lint_opts,
lint_cap,
describe_lints,
@@ -2959,6 +2999,7 @@ pub mod nightly_options {
) {
let has_z_unstable_option = matches.opt_strs("Z").iter().any(|x| *x == "unstable-options");
let really_allows_unstable_options = match_is_nightly_build(matches);
+ let mut nightly_options_on_stable = 0;
for opt in flags.iter() {
if opt.stability == OptionStability::Stable {
@@ -2979,20 +3020,27 @@ pub mod nightly_options {
}
match opt.stability {
OptionStability::Unstable => {
+ nightly_options_on_stable += 1;
let msg = format!(
"the option `{}` is only accepted on the nightly compiler",
opt.name
);
let _ = handler.early_error_no_abort(msg);
- handler.early_note("selecting a toolchain with `+toolchain` arguments require a rustup proxy; see <https://rust-lang.github.io/rustup/concepts/index.html>");
- handler.early_help(
- "consider switching to a nightly toolchain: `rustup default nightly`",
- );
- handler.early_note("for more information about Rust's stability policy, see <https://doc.rust-lang.org/book/appendix-07-nightly-rust.html#unstable-features>");
}
OptionStability::Stable => {}
}
}
+ if nightly_options_on_stable > 0 {
+ handler
+ .early_help("consider switching to a nightly toolchain: `rustup default nightly`");
+ handler.early_note("selecting a toolchain with `+toolchain` arguments require a rustup proxy; see <https://rust-lang.github.io/rustup/concepts/index.html>");
+ handler.early_note("for more information about Rust's stability policy, see <https://doc.rust-lang.org/book/appendix-07-nightly-rust.html#unstable-features>");
+ handler.early_error(format!(
+ "{} nightly option{} were parsed",
+ nightly_options_on_stable,
+ if nightly_options_on_stable > 1 { "s" } else { "" }
+ ));
+ }
}
}
@@ -3119,11 +3167,11 @@ impl PpMode {
/// how the hash should be calculated when adding a new command-line argument.
pub(crate) mod dep_tracking {
use super::{
- BranchProtection, CFGuard, CFProtection, CrateType, DebugInfo, ErrorOutputType,
- InstrumentCoverage, InstrumentXRay, LdImpl, LinkerPluginLto, LocationDetail, LtoCli,
- OomStrategy, OptLevel, OutFileName, OutputType, OutputTypes, Passes, ResolveDocLinks,
- SourceFileHashAlgorithm, SplitDwarfKind, SwitchWithOptPath, SymbolManglingVersion,
- TraitSolver, TrimmedDefPaths,
+ BranchProtection, CFGuard, CFProtection, CrateType, DebugInfo, DebugInfoCompression,
+ ErrorOutputType, InstrumentCoverage, InstrumentXRay, LdImpl, LinkerPluginLto,
+ LocationDetail, LtoCli, OomStrategy, OptLevel, OutFileName, OutputType, OutputTypes,
+ Passes, ResolveDocLinks, SourceFileHashAlgorithm, SplitDwarfKind, SwitchWithOptPath,
+ SymbolManglingVersion, TraitSolver, TrimmedDefPaths,
};
use crate::lint;
use crate::options::WasiExecModel;
@@ -3201,6 +3249,7 @@ pub(crate) mod dep_tracking {
OptLevel,
LtoCli,
DebugInfo,
+ DebugInfoCompression,
UnstableFeatures,
NativeLib,
NativeLibKind,
diff --git a/compiler/rustc_session/src/cstore.rs b/compiler/rustc_session/src/cstore.rs
index c53a355b5..d816842b0 100644
--- a/compiler/rustc_session/src/cstore.rs
+++ b/compiler/rustc_session/src/cstore.rs
@@ -7,7 +7,7 @@ use crate::utils::NativeLibKind;
use crate::Session;
use rustc_ast as ast;
use rustc_data_structures::owned_slice::OwnedSlice;
-use rustc_data_structures::sync::{self, AppendOnlyIndexVec, RwLock};
+use rustc_data_structures::sync::{self, AppendOnlyIndexVec, FreezeLock};
use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, StableCrateId, LOCAL_CRATE};
use rustc_hir::definitions::{DefKey, DefPath, DefPathHash, Definitions};
use rustc_span::hygiene::{ExpnHash, ExpnId};
@@ -258,8 +258,8 @@ pub trait CrateStore: std::fmt::Debug {
pub type CrateStoreDyn = dyn CrateStore + sync::DynSync + sync::DynSend;
pub struct Untracked {
- pub cstore: RwLock<Box<CrateStoreDyn>>,
+ pub cstore: FreezeLock<Box<CrateStoreDyn>>,
/// Reference span for definitions.
pub source_span: AppendOnlyIndexVec<LocalDefId, Span>,
- pub definitions: RwLock<Definitions>,
+ pub definitions: FreezeLock<Definitions>,
}
diff --git a/compiler/rustc_session/src/errors.rs b/compiler/rustc_session/src/errors.rs
index 78940462b..5f8bbfca8 100644
--- a/compiler/rustc_session/src/errors.rs
+++ b/compiler/rustc_session/src/errors.rs
@@ -57,6 +57,12 @@ pub struct FeatureDiagnosticHelp {
pub feature: Symbol,
}
+#[derive(Subdiagnostic)]
+#[help(session_cli_feature_diagnostic_help)]
+pub struct CliFeatureDiagnosticHelp {
+ pub feature: Symbol,
+}
+
#[derive(Diagnostic)]
#[diag(session_not_circumvent_feature)]
pub struct NotCircumventFeature;
diff --git a/compiler/rustc_session/src/lib.rs b/compiler/rustc_session/src/lib.rs
index a270817f3..d6c746a7b 100644
--- a/compiler/rustc_session/src/lib.rs
+++ b/compiler/rustc_session/src/lib.rs
@@ -10,7 +10,7 @@
#![allow(rustc::potential_query_instability)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#[macro_use]
extern crate rustc_macros;
diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs
index 055ab2d9c..c1424db60 100644
--- a/compiler/rustc_session/src/options.rs
+++ b/compiler/rustc_session/src/options.rs
@@ -20,7 +20,7 @@ use std::collections::BTreeMap;
use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher;
-use std::num::NonZeroUsize;
+use std::num::{IntErrorKind, NonZeroUsize};
use std::path::PathBuf;
use std::str;
@@ -139,6 +139,7 @@ top_level_options!(
/// can influence whether overflow checks are done or not.
debug_assertions: bool [TRACKED],
debuginfo: DebugInfo [TRACKED],
+ debuginfo_compression: DebugInfoCompression [TRACKED],
lint_opts: Vec<(String, lint::Level)> [TRACKED_NO_CRATE_HASH],
lint_cap: Option<lint::Level> [TRACKED_NO_CRATE_HASH],
describe_lints: bool [UNTRACKED],
@@ -376,6 +377,7 @@ mod desc {
"either a boolean (`yes`, `no`, `on`, `off`, etc), `checks`, or `nochecks`";
pub const parse_cfprotection: &str = "`none`|`no`|`n` (default), `branch`, `return`, or `full`|`yes`|`y` (equivalent to `branch` and `return`)";
pub const parse_debuginfo: &str = "either an integer (0, 1, 2), `none`, `line-directives-only`, `line-tables-only`, `limited`, or `full`";
+ pub const parse_debuginfo_compression: &str = "one of `none`, `zlib`, or `zstd`";
pub const parse_strip: &str = "either `none`, `debuginfo`, or `symbols`";
pub const parse_linker_flavor: &str = ::rustc_target::spec::LinkerFlavorCli::one_of();
pub const parse_optimization_fuel: &str = "crate=integer";
@@ -385,7 +387,7 @@ mod desc {
"`all` (default), `except-unused-generics`, `except-unused-functions`, or `off`";
pub const parse_instrument_xray: &str = "either a boolean (`yes`, `no`, `on`, `off`, etc), or a comma separated list of settings: `always` or `never` (mutually exclusive), `ignore-loops`, `instruction-threshold=N`, `skip-entry`, `skip-exit`";
pub const parse_unpretty: &str = "`string` or `string=string`";
- pub const parse_treat_err_as_bug: &str = "either no value or a number bigger than 0";
+ pub const parse_treat_err_as_bug: &str = "either no value or a non-negative number";
pub const parse_trait_solver: &str =
"one of the supported solver modes (`classic`, `next`, or `next-coherence`)";
pub const parse_lto: &str =
@@ -782,6 +784,19 @@ mod parse {
true
}
+ pub(crate) fn parse_debuginfo_compression(
+ slot: &mut DebugInfoCompression,
+ v: Option<&str>,
+ ) -> bool {
+ match v {
+ Some("none") => *slot = DebugInfoCompression::None,
+ Some("zlib") => *slot = DebugInfoCompression::Zlib,
+ Some("zstd") => *slot = DebugInfoCompression::Zstd,
+ _ => return false,
+ };
+ true
+ }
+
pub(crate) fn parse_linker_flavor(slot: &mut Option<LinkerFlavorCli>, v: Option<&str>) -> bool {
match v.and_then(LinkerFlavorCli::from_str) {
Some(lf) => *slot = Some(lf),
@@ -971,10 +986,16 @@ mod parse {
pub(crate) fn parse_treat_err_as_bug(slot: &mut Option<NonZeroUsize>, v: Option<&str>) -> bool {
match v {
- Some(s) => {
- *slot = s.parse().ok();
- slot.is_some()
- }
+ Some(s) => match s.parse() {
+ Ok(val) => {
+ *slot = Some(val);
+ true
+ }
+ Err(e) => {
+ *slot = None;
+ e.kind() == &IntErrorKind::Zero
+ }
+ },
None => {
*slot = NonZeroUsize::new(1);
true
@@ -1424,6 +1445,8 @@ options! {
"emit discriminators and other data necessary for AutoFDO"),
debug_macros: bool = (false, parse_bool, [TRACKED],
"emit line numbers debug info inside macros (default: no)"),
+ debuginfo_compression: DebugInfoCompression = (DebugInfoCompression::None, parse_debuginfo_compression, [TRACKED],
+ "compress debug info sections (none, zlib, zstd, default: none)"),
deduplicate_diagnostics: bool = (true, parse_bool, [UNTRACKED],
"deduplicate identical diagnostics (default: yes)"),
dep_info_omit_d_target: bool = (false, parse_bool, [TRACKED],
@@ -1435,17 +1458,11 @@ options! {
dont_buffer_diagnostics: bool = (false, parse_bool, [UNTRACKED],
"emit diagnostics rather than buffering (breaks NLL error downgrading, sorting) \
(default: no)"),
- drop_tracking: bool = (false, parse_bool, [TRACKED],
- "enables drop tracking in generators (default: no)"),
- drop_tracking_mir: bool = (false, parse_bool, [TRACKED],
- "enables drop tracking on MIR in generators (default: no)"),
dual_proc_macros: bool = (false, parse_bool, [TRACKED],
"load proc macros for both target and host, but only link to the target (default: no)"),
dump_dep_graph: bool = (false, parse_bool, [UNTRACKED],
"dump the dependency graph to $RUST_DEP_GRAPH (default: /tmp/dep_graph.gv) \
(default: no)"),
- dump_drop_tracking_cfg: Option<String> = (None, parse_opt_string, [UNTRACKED],
- "dump drop-tracking control-flow graph as a `.dot` file (default: no)"),
dump_mir: Option<String> = (None, parse_opt_string, [UNTRACKED],
"dump MIR state to file.
`val` is used to select which passes and functions to dump. For example:
@@ -1461,15 +1478,12 @@ options! {
dump_mir_exclude_pass_number: bool = (false, parse_bool, [UNTRACKED],
"exclude the pass number when dumping MIR (used in tests) (default: no)"),
dump_mir_graphviz: bool = (false, parse_bool, [UNTRACKED],
- "in addition to `.mir` files, create graphviz `.dot` files (and with \
- `-Z instrument-coverage`, also create a `.dot` file for the MIR-derived \
- coverage graph) (default: no)"),
+ "in addition to `.mir` files, create graphviz `.dot` files (default: no)"),
dump_mir_spanview: Option<MirSpanview> = (None, parse_mir_spanview, [UNTRACKED],
"in addition to `.mir` files, create `.html` files to view spans for \
all `statement`s (including terminators), only `terminator` spans, or \
computed `block` spans (one span encompassing a block's terminator and \
- all statements). If `-Z instrument-coverage` is also enabled, create \
- an additional `.html` file showing the computed coverage spans."),
+ all statements)."),
dump_mono_stats: SwitchWithOptPath = (SwitchWithOptPath::Disabled,
parse_switch_with_opt_path, [UNTRACKED],
"output statistics about monomorphization collection"),
@@ -1519,14 +1533,13 @@ options! {
"generate human-readable, predictable names for codegen units (default: no)"),
identify_regions: bool = (false, parse_bool, [UNTRACKED],
"display unnamed regions as `'<id>`, using a non-ident unique id (default: no)"),
+ ignore_directory_in_diagnostics_source_blocks: Vec<String> = (Vec::new(), parse_string_push, [UNTRACKED],
+ "do not display the source code block in diagnostics for files in the directory"),
incremental_ignore_spans: bool = (false, parse_bool, [TRACKED],
"ignore spans during ICH computation -- used for testing (default: no)"),
incremental_info: bool = (false, parse_bool, [UNTRACKED],
"print high-level information about incremental reuse (or the lack thereof) \
(default: no)"),
- #[rustc_lint_opt_deny_field_access("use `Session::incremental_relative_spans` instead of this field")]
- incremental_relative_spans: bool = (false, parse_bool, [TRACKED],
- "hash spans relative to their parent item for incr. comp. (default: no)"),
incremental_verify_ich: bool = (false, parse_bool, [UNTRACKED],
"verify incr. comp. hashes of green query instances (default: no)"),
inline_in_all_cgus: Option<bool> = (None, parse_opt_bool, [TRACKED],
@@ -1580,8 +1593,9 @@ options! {
"what location details should be tracked when using caller_location, either \
`none`, or a comma separated list of location details, for which \
valid options are `file`, `line`, and `column` (default: `file,line,column`)"),
- ls: bool = (false, parse_bool, [UNTRACKED],
- "list the symbols defined by a library crate (default: no)"),
+ ls: Vec<String> = (Vec::new(), parse_list, [UNTRACKED],
+ "decode and print various parts of the crate metadata for a library crate \
+ (space separated)"),
macro_backtrace: bool = (false, parse_bool, [UNTRACKED],
"show macro backtraces (default: no)"),
maximal_hir_to_mir_coverage: bool = (false, parse_bool, [TRACKED],
@@ -1631,6 +1645,8 @@ options! {
"run LLVM in non-parallel mode (while keeping codegen-units and ThinLTO)"),
no_profiler_runtime: bool = (false, parse_no_flag, [TRACKED],
"prevent automatic injection of the profiler_builtins crate"),
+ no_trait_vptr: bool = (false, parse_no_flag, [TRACKED],
+ "disable generation of trait vptr in vtable for upcasting"),
no_unique_section_names: bool = (false, parse_bool, [TRACKED],
"do not use unique names for text and data sections when -Z function-sections is used"),
normalize_docs: bool = (false, parse_bool, [TRACKED],
@@ -1830,7 +1846,8 @@ written to standard error output)"),
trap_unreachable: Option<bool> = (None, parse_opt_bool, [TRACKED],
"generate trap instructions for unreachable intrinsics (default: use target setting, usually yes)"),
treat_err_as_bug: Option<NonZeroUsize> = (None, parse_treat_err_as_bug, [TRACKED],
- "treat error number `val` that occurs as bug"),
+ "treat the `val`th error that occurs as bug (default if not specified: 0 - don't treat errors as bugs. \
+ default if specified without a value: 1 - treat the first error as bug)"),
trim_diagnostic_paths: bool = (true, parse_bool, [UNTRACKED],
"in diagnostics, use heuristics to shorten paths referring to items"),
tune_cpu: Option<String> = (None, parse_opt_string, [TRACKED],
diff --git a/compiler/rustc_session/src/output.rs b/compiler/rustc_session/src/output.rs
index c0884fb21..7a57b0621 100644
--- a/compiler/rustc_session/src/output.rs
+++ b/compiler/rustc_session/src/output.rs
@@ -119,26 +119,11 @@ pub fn validate_crate_name(sess: &Session, s: Symbol, sp: Option<Span>) {
}
}
-pub fn filename_for_metadata(
- sess: &Session,
- crate_name: Symbol,
- outputs: &OutputFilenames,
-) -> OutFileName {
- // If the command-line specified the path, use that directly.
- if let Some(Some(out_filename)) = sess.opts.output_types.get(&OutputType::Metadata) {
- return out_filename.clone();
- }
-
- let libname = format!("{}{}", crate_name, sess.opts.cg.extra_filename);
-
- let out_filename = outputs.single_output_file.clone().unwrap_or_else(|| {
- OutFileName::Real(outputs.out_directory.join(&format!("lib{libname}.rmeta")))
- });
-
+pub fn filename_for_metadata(sess: &Session, outputs: &OutputFilenames) -> OutFileName {
+ let out_filename = outputs.path(OutputType::Metadata);
if let OutFileName::Real(ref path) = out_filename {
check_file_is_writeable(path, sess);
}
-
out_filename
}
diff --git a/compiler/rustc_session/src/parse.rs b/compiler/rustc_session/src/parse.rs
index 1cf63e9b7..671204c0d 100644
--- a/compiler/rustc_session/src/parse.rs
+++ b/compiler/rustc_session/src/parse.rs
@@ -2,7 +2,9 @@
//! It also serves as an input to the parser itself.
use crate::config::CheckCfg;
-use crate::errors::{FeatureDiagnosticForIssue, FeatureDiagnosticHelp, FeatureGateError};
+use crate::errors::{
+ CliFeatureDiagnosticHelp, FeatureDiagnosticForIssue, FeatureDiagnosticHelp, FeatureGateError,
+};
use crate::lint::{
builtin::UNSTABLE_SYNTAX_PRE_EXPANSION, BufferedEarlyLint, BuiltinLintDiagnostics, Lint, LintId,
};
@@ -110,7 +112,7 @@ pub fn feature_err_issue(
}
let mut err = sess.create_err(FeatureGateError { span, explain: explain.into() });
- add_feature_diagnostics_for_issue(&mut err, sess, feature, issue);
+ add_feature_diagnostics_for_issue(&mut err, sess, feature, issue, false);
err
}
@@ -139,7 +141,7 @@ pub fn feature_warn_issue(
explain: &'static str,
) {
let mut err = sess.span_diagnostic.struct_span_warn(span, explain);
- add_feature_diagnostics_for_issue(&mut err, sess, feature, issue);
+ add_feature_diagnostics_for_issue(&mut err, sess, feature, issue, false);
// Decorate this as a future-incompatibility lint as in rustc_middle::lint::struct_lint_level
let lint = UNSTABLE_SYNTAX_PRE_EXPANSION;
@@ -158,7 +160,7 @@ pub fn feature_warn_issue(
/// Adds the diagnostics for a feature to an existing error.
pub fn add_feature_diagnostics(err: &mut Diagnostic, sess: &ParseSess, feature: Symbol) {
- add_feature_diagnostics_for_issue(err, sess, feature, GateIssue::Language);
+ add_feature_diagnostics_for_issue(err, sess, feature, GateIssue::Language, false);
}
/// Adds the diagnostics for a feature to an existing error.
@@ -171,6 +173,7 @@ pub fn add_feature_diagnostics_for_issue(
sess: &ParseSess,
feature: Symbol,
issue: GateIssue,
+ feature_from_cli: bool,
) {
if let Some(n) = find_feature_issue(feature, issue) {
err.subdiagnostic(FeatureDiagnosticForIssue { n });
@@ -178,7 +181,11 @@ pub fn add_feature_diagnostics_for_issue(
// #23973: do not suggest `#![feature(...)]` if we are in beta/stable
if sess.unstable_features.is_nightly_build() {
- err.subdiagnostic(FeatureDiagnosticHelp { feature });
+ if feature_from_cli {
+ err.subdiagnostic(CliFeatureDiagnosticHelp { feature });
+ } else {
+ err.subdiagnostic(FeatureDiagnosticHelp { feature });
+ }
}
}
diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs
index 086ce4e69..b484978ee 100644
--- a/compiler/rustc_session/src/session.rs
+++ b/compiler/rustc_session/src/session.rs
@@ -17,10 +17,10 @@ use rustc_data_structures::fx::{FxHashMap, FxIndexSet};
use rustc_data_structures::jobserver::{self, Client};
use rustc_data_structures::profiling::{duration_to_secs_str, SelfProfiler, SelfProfilerRef};
use rustc_data_structures::sync::{
- self, AtomicU64, AtomicUsize, Lock, Lrc, OneThread, Ordering, Ordering::SeqCst,
+ AtomicU64, AtomicUsize, Lock, Lrc, OneThread, Ordering, Ordering::SeqCst,
};
use rustc_errors::annotate_snippet_emitter_writer::AnnotateSnippetEmitterWriter;
-use rustc_errors::emitter::{Emitter, EmitterWriter, HumanReadableErrorType};
+use rustc_errors::emitter::{DynEmitter, EmitterWriter, HumanReadableErrorType};
use rustc_errors::json::JsonEmitter;
use rustc_errors::registry::Registry;
use rustc_errors::{
@@ -204,6 +204,12 @@ pub struct Session {
/// The version of the rustc process, possibly including a commit hash and description.
pub cfg_version: &'static str,
+
+ /// All commandline args used to invoke the compiler, with @file args fully expanded.
+ /// This will only be used within debug info, e.g. in the pdb file on windows
+ /// This is mainly useful for other tools that reads that debuginfo to figure out
+ /// how to call the compiler with the same arguments.
+ pub expanded_args: Vec<String>,
}
pub struct PerfStats {
@@ -1251,7 +1257,7 @@ fn default_emitter(
source_map: Lrc<SourceMap>,
bundle: Option<Lrc<FluentBundle>>,
fallback_bundle: LazyFallbackBundle,
-) -> Box<dyn Emitter + sync::Send> {
+) -> Box<DynEmitter> {
let macro_backtrace = sopts.unstable_opts.macro_backtrace;
let track_diagnostics = sopts.unstable_opts.track_diagnostics;
let terminal_url = match sopts.unstable_opts.terminal_urls {
@@ -1289,7 +1295,10 @@ fn default_emitter(
.diagnostic_width(sopts.diagnostic_width)
.macro_backtrace(macro_backtrace)
.track_diagnostics(track_diagnostics)
- .terminal_url(terminal_url);
+ .terminal_url(terminal_url)
+ .ignored_directories_in_source_blocks(
+ sopts.unstable_opts.ignore_directory_in_diagnostics_source_blocks.clone(),
+ );
Box::new(emitter.ui_testing(sopts.unstable_opts.ui_testing))
}
}
@@ -1306,7 +1315,10 @@ fn default_emitter(
track_diagnostics,
terminal_url,
)
- .ui_testing(sopts.unstable_opts.ui_testing),
+ .ui_testing(sopts.unstable_opts.ui_testing)
+ .ignored_directories_in_source_blocks(
+ sopts.unstable_opts.ignore_directory_in_diagnostics_source_blocks.clone(),
+ ),
),
}
}
@@ -1325,6 +1337,7 @@ pub fn build_session(
target_override: Option<Target>,
cfg_version: &'static str,
ice_file: Option<PathBuf>,
+ expanded_args: Vec<String>,
) -> Session {
// FIXME: This is not general enough to make the warning lint completely override
// normal diagnostic warnings, since the warning lint can also be denied and changed
@@ -1467,6 +1480,7 @@ pub fn build_session(
target_features: Default::default(),
unstable_target_features: Default::default(),
cfg_version,
+ expanded_args,
};
validate_commandline_args_with_session_available(&sess);
@@ -1712,17 +1726,26 @@ impl EarlyErrorHandler {
#[allow(rustc::untranslatable_diagnostic)]
#[allow(rustc::diagnostic_outside_of_impl)]
+ pub(crate) fn early_struct_error(
+ &self,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, !> {
+ self.handler.struct_fatal(msg)
+ }
+
+ #[allow(rustc::untranslatable_diagnostic)]
+ #[allow(rustc::diagnostic_outside_of_impl)]
pub fn early_warn(&self, msg: impl Into<DiagnosticMessage>) {
self.handler.struct_warn(msg).emit()
}
}
-fn mk_emitter(output: ErrorOutputType) -> Box<dyn Emitter + sync::Send + 'static> {
+fn mk_emitter(output: ErrorOutputType) -> Box<DynEmitter> {
// FIXME(#100717): early errors aren't translated at the moment, so this is fine, but it will
// need to reference every crate that might emit an early error for translation to work.
let fallback_bundle =
fallback_fluent_bundle(vec![rustc_errors::DEFAULT_LOCALE_RESOURCE], false);
- let emitter: Box<dyn Emitter + sync::Send> = match output {
+ let emitter: Box<DynEmitter> = match output {
config::ErrorOutputType::HumanReadable(kind) => {
let (short, color_config) = kind.unzip();
Box::new(EmitterWriter::stderr(color_config, fallback_bundle).short_message(short))
diff --git a/compiler/rustc_session/src/utils.rs b/compiler/rustc_session/src/utils.rs
index 71f2591fe..aea7c6c28 100644
--- a/compiler/rustc_session/src/utils.rs
+++ b/compiler/rustc_session/src/utils.rs
@@ -111,3 +111,59 @@ impl CanonicalizedPath {
&self.original
}
}
+
+/// Gets a list of extra command-line flags provided by the user, as strings.
+///
+/// This function is used during ICEs to show more information useful for
+/// debugging, since some ICEs only happens with non-default compiler flags
+/// (and the users don't always report them).
+pub fn extra_compiler_flags() -> Option<(Vec<String>, bool)> {
+ const ICE_REPORT_COMPILER_FLAGS: &[&str] = &["-Z", "-C", "--crate-type"];
+
+ const ICE_REPORT_COMPILER_FLAGS_EXCLUDE: &[&str] = &["metadata", "extra-filename"];
+
+ const ICE_REPORT_COMPILER_FLAGS_STRIP_VALUE: &[&str] = &["incremental"];
+
+ let mut args = std::env::args_os().map(|arg| arg.to_string_lossy().to_string()).peekable();
+
+ let mut result = Vec::new();
+ let mut excluded_cargo_defaults = false;
+ while let Some(arg) = args.next() {
+ if let Some(a) = ICE_REPORT_COMPILER_FLAGS.iter().find(|a| arg.starts_with(*a)) {
+ let content = if arg.len() == a.len() {
+ // A space-separated option, like `-C incremental=foo` or `--crate-type rlib`
+ match args.next() {
+ Some(arg) => arg.to_string(),
+ None => continue,
+ }
+ } else if arg.get(a.len()..a.len() + 1) == Some("=") {
+ // An equals option, like `--crate-type=rlib`
+ arg[a.len() + 1..].to_string()
+ } else {
+ // A non-space option, like `-Cincremental=foo`
+ arg[a.len()..].to_string()
+ };
+ let option = content.split_once('=').map(|s| s.0).unwrap_or(&content);
+ if ICE_REPORT_COMPILER_FLAGS_EXCLUDE.iter().any(|exc| option == *exc) {
+ excluded_cargo_defaults = true;
+ } else {
+ result.push(a.to_string());
+ match ICE_REPORT_COMPILER_FLAGS_STRIP_VALUE.iter().find(|s| option == **s) {
+ Some(s) => result.push(format!("{s}=[REDACTED]")),
+ None => result.push(content),
+ }
+ }
+ }
+ }
+
+ if !result.is_empty() { Some((result, excluded_cargo_defaults)) } else { None }
+}
+
+pub(crate) fn is_ascii_ident(string: &str) -> bool {
+ let mut chars = string.chars();
+ if let Some(start) = chars.next() && (start.is_ascii_alphabetic() || start == '_') {
+ chars.all(|char| char.is_ascii_alphanumeric() || char == '_')
+ } else {
+ false
+ }
+}
diff --git a/compiler/rustc_smir/Cargo.toml b/compiler/rustc_smir/Cargo.toml
index 80d4e7ed0..4c29f7437 100644
--- a/compiler/rustc_smir/Cargo.toml
+++ b/compiler/rustc_smir/Cargo.toml
@@ -4,18 +4,14 @@ version = "0.0.0"
edition = "2021"
[dependencies]
-# Use optional dependencies for rustc_* in order to support building this crate separately.
-rustc_hir = { path = "../rustc_hir", optional = true }
-rustc_middle = { path = "../rustc_middle", optional = true }
-rustc_span = { path = "../rustc_span", optional = true }
-rustc_target = { path = "../rustc_target", optional = true }
+rustc_hir = { path = "../rustc_hir" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_span = { path = "../rustc_span" }
+rustc_target = { path = "../rustc_target" }
+rustc_driver = { path = "../rustc_driver" }
+rustc_interface = { path = "../rustc_interface" }
+rustc_session = {path = "../rustc_session" }
tracing = "0.1"
-scoped-tls = "1.0"
+stable_mir = {path = "../stable_mir" }
[features]
-default = [
- "rustc_hir",
- "rustc_middle",
- "rustc_span",
- "rustc_target",
-]
diff --git a/compiler/rustc_smir/src/lib.rs b/compiler/rustc_smir/src/lib.rs
index 8cb533c8d..b6c36678d 100644
--- a/compiler/rustc_smir/src/lib.rs
+++ b/compiler/rustc_smir/src/lib.rs
@@ -10,26 +10,12 @@
html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/",
test(attr(allow(unused_variables), deny(warnings)))
)]
-#![cfg_attr(not(feature = "default"), feature(rustc_private))]
+#![feature(rustc_private)]
#![feature(ptr_metadata)]
#![feature(type_alias_impl_trait)] // Used to define opaque types.
#![feature(intra_doc_pointers)]
-// Declare extern rustc_* crates to enable building this crate separately from the compiler.
-#[cfg(not(feature = "default"))]
-extern crate rustc_hir;
-#[cfg(not(feature = "default"))]
-extern crate rustc_middle;
-#[cfg(not(feature = "default"))]
-extern crate rustc_span;
-#[cfg(not(feature = "default"))]
-extern crate rustc_target;
-
pub mod rustc_internal;
-pub mod stable_mir;
// Make this module private for now since external users should not call these directly.
mod rustc_smir;
-
-#[macro_use]
-extern crate scoped_tls;
diff --git a/compiler/rustc_smir/src/rustc_internal/mod.rs b/compiler/rustc_smir/src/rustc_internal/mod.rs
index 078ff6744..1a9dea99f 100644
--- a/compiler/rustc_smir/src/rustc_internal/mod.rs
+++ b/compiler/rustc_smir/src/rustc_internal/mod.rs
@@ -3,83 +3,37 @@
//! For that, we define APIs that will temporarily be public to 3P that exposes rustc internal APIs
//! until stable MIR is complete.
-use std::fmt::Debug;
-use std::string::ToString;
+use std::ops::{ControlFlow, Index};
-use crate::{
- rustc_smir::Tables,
- stable_mir::{self, with},
-};
+use crate::rustc_internal;
+use crate::rustc_smir::Tables;
+use rustc_driver::{Callbacks, Compilation, RunCompiler};
+use rustc_interface::{interface, Queries};
+use rustc_middle::mir::interpret::AllocId;
use rustc_middle::ty::TyCtxt;
pub use rustc_span::def_id::{CrateNum, DefId};
+use rustc_span::Span;
+use stable_mir::CompilerError;
-fn with_tables<R>(mut f: impl FnMut(&mut Tables<'_>) -> R) -> R {
- let mut ret = None;
- with(|tables| tables.rustc_tables(&mut |t| ret = Some(f(t))));
- ret.unwrap()
-}
-
-pub fn item_def_id(item: &stable_mir::CrateItem) -> DefId {
- with_tables(|t| t.item_def_id(item))
-}
-
-pub fn crate_item(did: DefId) -> stable_mir::CrateItem {
- with_tables(|t| t.crate_item(did))
-}
-
-pub fn adt_def(did: DefId) -> stable_mir::ty::AdtDef {
- with_tables(|t| t.adt_def(did))
-}
-
-pub fn foreign_def(did: DefId) -> stable_mir::ty::ForeignDef {
- with_tables(|t| t.foreign_def(did))
-}
-
-pub fn fn_def(did: DefId) -> stable_mir::ty::FnDef {
- with_tables(|t| t.fn_def(did))
-}
-
-pub fn closure_def(did: DefId) -> stable_mir::ty::ClosureDef {
- with_tables(|t| t.closure_def(did))
-}
-
-pub fn generator_def(did: DefId) -> stable_mir::ty::GeneratorDef {
- with_tables(|t| t.generator_def(did))
-}
-
-pub fn alias_def(did: DefId) -> stable_mir::ty::AliasDef {
- with_tables(|t| t.alias_def(did))
-}
-
-pub fn param_def(did: DefId) -> stable_mir::ty::ParamDef {
- with_tables(|t| t.param_def(did))
-}
+impl<'tcx> Index<stable_mir::DefId> for Tables<'tcx> {
+ type Output = DefId;
-pub fn br_named_def(did: DefId) -> stable_mir::ty::BrNamedDef {
- with_tables(|t| t.br_named_def(did))
+ #[inline(always)]
+ fn index(&self, index: stable_mir::DefId) -> &Self::Output {
+ &self.def_ids[index.0]
+ }
}
-pub fn trait_def(did: DefId) -> stable_mir::ty::TraitDef {
- with_tables(|t| t.trait_def(did))
-}
+impl<'tcx> Index<stable_mir::ty::Span> for Tables<'tcx> {
+ type Output = Span;
-pub fn impl_def(did: DefId) -> stable_mir::ty::ImplDef {
- with_tables(|t| t.impl_def(did))
+ #[inline(always)]
+ fn index(&self, index: stable_mir::ty::Span) -> &Self::Output {
+ &self.spans[index.0]
+ }
}
impl<'tcx> Tables<'tcx> {
- pub fn item_def_id(&self, item: &stable_mir::CrateItem) -> DefId {
- self.def_ids[item.0]
- }
-
- pub fn trait_def_id(&self, trait_def: &stable_mir::ty::TraitDef) -> DefId {
- self.def_ids[trait_def.0]
- }
-
- pub fn impl_trait_def_id(&self, impl_def: &stable_mir::ty::ImplDef) -> DefId {
- self.def_ids[impl_def.0]
- }
-
pub fn crate_item(&mut self, did: DefId) -> stable_mir::CrateItem {
stable_mir::CrateItem(self.create_def_id(did))
}
@@ -120,6 +74,10 @@ impl<'tcx> Tables<'tcx> {
stable_mir::ty::TraitDef(self.create_def_id(did))
}
+ pub fn generic_def(&mut self, did: DefId) -> stable_mir::ty::GenericDef {
+ stable_mir::ty::GenericDef(self.create_def_id(did))
+ }
+
pub fn const_def(&mut self, did: DefId) -> stable_mir::ty::ConstDef {
stable_mir::ty::ConstDef(self.create_def_id(did))
}
@@ -128,16 +86,45 @@ impl<'tcx> Tables<'tcx> {
stable_mir::ty::ImplDef(self.create_def_id(did))
}
+ pub fn region_def(&mut self, did: DefId) -> stable_mir::ty::RegionDef {
+ stable_mir::ty::RegionDef(self.create_def_id(did))
+ }
+
+ pub fn prov(&mut self, aid: AllocId) -> stable_mir::ty::Prov {
+ stable_mir::ty::Prov(self.create_alloc_id(aid))
+ }
+
fn create_def_id(&mut self, did: DefId) -> stable_mir::DefId {
// FIXME: this becomes inefficient when we have too many ids
for (i, &d) in self.def_ids.iter().enumerate() {
if d == did {
- return i;
+ return stable_mir::DefId(i);
}
}
let id = self.def_ids.len();
self.def_ids.push(did);
- id
+ stable_mir::DefId(id)
+ }
+
+ fn create_alloc_id(&mut self, aid: AllocId) -> stable_mir::AllocId {
+ // FIXME: this becomes inefficient when we have too many ids
+ if let Some(i) = self.alloc_ids.iter().position(|a| *a == aid) {
+ return stable_mir::AllocId(i);
+ };
+ let id = self.def_ids.len();
+ self.alloc_ids.push(aid);
+ stable_mir::AllocId(id)
+ }
+
+ pub(crate) fn create_span(&mut self, span: Span) -> stable_mir::ty::Span {
+ for (i, &sp) in self.spans.iter().enumerate() {
+ if sp == span {
+ return stable_mir::ty::Span(i);
+ }
+ }
+ let id = self.spans.len();
+ self.spans.push(span);
+ stable_mir::ty::Span(id)
}
}
@@ -146,12 +133,67 @@ pub fn crate_num(item: &stable_mir::Crate) -> CrateNum {
}
pub fn run(tcx: TyCtxt<'_>, f: impl FnOnce()) {
- crate::stable_mir::run(Tables { tcx, def_ids: vec![], types: vec![] }, f);
+ stable_mir::run(
+ Tables { tcx, def_ids: vec![], alloc_ids: vec![], spans: vec![], types: vec![] },
+ f,
+ );
+}
+
+pub struct StableMir<B = (), C = ()>
+where
+ B: Send,
+ C: Send,
+{
+ args: Vec<String>,
+ callback: fn(TyCtxt<'_>) -> ControlFlow<B, C>,
+ result: Option<ControlFlow<B, C>>,
+}
+
+impl<B, C> StableMir<B, C>
+where
+ B: Send,
+ C: Send,
+{
+ /// Creates a new `StableMir` instance, with given test_function and arguments.
+ pub fn new(args: Vec<String>, callback: fn(TyCtxt<'_>) -> ControlFlow<B, C>) -> Self {
+ StableMir { args, callback, result: None }
+ }
+
+ /// Runs the compiler against given target and tests it with `test_function`
+ pub fn run(&mut self) -> Result<C, CompilerError<B>> {
+ let compiler_result =
+ rustc_driver::catch_fatal_errors(|| RunCompiler::new(&self.args.clone(), self).run());
+ match (compiler_result, self.result.take()) {
+ (Ok(Ok(())), Some(ControlFlow::Continue(value))) => Ok(value),
+ (Ok(Ok(())), Some(ControlFlow::Break(value))) => Err(CompilerError::Interrupted(value)),
+ (Ok(Ok(_)), None) => Err(CompilerError::Skipped),
+ (Ok(Err(_)), _) => Err(CompilerError::CompilationFailed),
+ (Err(_), _) => Err(CompilerError::ICE),
+ }
+ }
}
-/// A type that provides internal information but that can still be used for debug purpose.
-pub type Opaque = impl Debug + ToString + Clone;
-
-pub(crate) fn opaque<T: Debug>(value: &T) -> Opaque {
- format!("{value:?}")
+impl<B, C> Callbacks for StableMir<B, C>
+where
+ B: Send,
+ C: Send,
+{
+ /// Called after analysis. Return value instructs the compiler whether to
+ /// continue the compilation afterwards (defaults to `Compilation::Continue`)
+ fn after_analysis<'tcx>(
+ &mut self,
+ _compiler: &interface::Compiler,
+ queries: &'tcx Queries<'tcx>,
+ ) -> Compilation {
+ queries.global_ctxt().unwrap().enter(|tcx| {
+ rustc_internal::run(tcx, || {
+ self.result = Some((self.callback)(tcx));
+ });
+ if self.result.as_ref().is_some_and(|val| val.is_continue()) {
+ Compilation::Continue
+ } else {
+ Compilation::Stop
+ }
+ })
+ }
}
diff --git a/compiler/rustc_smir/src/rustc_smir/alloc.rs b/compiler/rustc_smir/src/rustc_smir/alloc.rs
new file mode 100644
index 000000000..63a2a1450
--- /dev/null
+++ b/compiler/rustc_smir/src/rustc_smir/alloc.rs
@@ -0,0 +1,123 @@
+use rustc_middle::mir::{
+ interpret::{alloc_range, AllocRange, Pointer},
+ ConstValue,
+};
+
+use crate::rustc_smir::{Stable, Tables};
+use stable_mir::mir::Mutability;
+use stable_mir::ty::{Allocation, ProvenanceMap};
+
+/// Creates new empty `Allocation` from given `Align`.
+fn new_empty_allocation(align: rustc_target::abi::Align) -> Allocation {
+ Allocation {
+ bytes: Vec::new(),
+ provenance: ProvenanceMap { ptrs: Vec::new() },
+ align: align.bytes(),
+ mutability: Mutability::Not,
+ }
+}
+
+// We need this method instead of a Stable implementation
+// because we need to get `Ty` of the const we are trying to create, to do that
+// we need to have access to `ConstantKind` but we can't access that inside Stable impl.
+#[allow(rustc::usage_of_qualified_ty)]
+pub fn new_allocation<'tcx>(
+ ty: rustc_middle::ty::Ty<'tcx>,
+ const_value: ConstValue<'tcx>,
+ tables: &mut Tables<'tcx>,
+) -> Allocation {
+ match const_value {
+ ConstValue::Scalar(scalar) => {
+ let size = scalar.size();
+ let align = tables
+ .tcx
+ .layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(ty))
+ .unwrap()
+ .align;
+ let mut allocation = rustc_middle::mir::interpret::Allocation::uninit(size, align.abi);
+ allocation
+ .write_scalar(&tables.tcx, alloc_range(rustc_target::abi::Size::ZERO, size), scalar)
+ .unwrap();
+ allocation.stable(tables)
+ }
+ ConstValue::ZeroSized => {
+ let align =
+ tables.tcx.layout_of(rustc_middle::ty::ParamEnv::empty().and(ty)).unwrap().align;
+ new_empty_allocation(align.abi)
+ }
+ ConstValue::Slice { data, meta } => {
+ let alloc_id = tables.tcx.reserve_and_set_memory_alloc(data);
+ let ptr = Pointer::new(alloc_id, rustc_target::abi::Size::ZERO);
+ let scalar_ptr = rustc_middle::mir::interpret::Scalar::from_pointer(ptr, &tables.tcx);
+ let scalar_meta =
+ rustc_middle::mir::interpret::Scalar::from_target_usize(meta, &tables.tcx);
+ let layout =
+ tables.tcx.layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(ty)).unwrap();
+ let mut allocation =
+ rustc_middle::mir::interpret::Allocation::uninit(layout.size, layout.align.abi);
+ allocation
+ .write_scalar(
+ &tables.tcx,
+ alloc_range(rustc_target::abi::Size::ZERO, tables.tcx.data_layout.pointer_size),
+ scalar_ptr,
+ )
+ .unwrap();
+ allocation
+ .write_scalar(
+ &tables.tcx,
+ alloc_range(tables.tcx.data_layout.pointer_size, scalar_meta.size()),
+ scalar_meta,
+ )
+ .unwrap();
+ allocation.stable(tables)
+ }
+ ConstValue::Indirect { alloc_id, offset } => {
+ let alloc = tables.tcx.global_alloc(alloc_id).unwrap_memory();
+ let ty_size = tables
+ .tcx
+ .layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(ty))
+ .unwrap()
+ .size;
+ allocation_filter(&alloc.0, alloc_range(offset, ty_size), tables)
+ }
+ }
+}
+
+/// Creates an `Allocation` only from information within the `AllocRange`.
+pub(super) fn allocation_filter<'tcx>(
+ alloc: &rustc_middle::mir::interpret::Allocation,
+ alloc_range: AllocRange,
+ tables: &mut Tables<'tcx>,
+) -> Allocation {
+ let mut bytes: Vec<Option<u8>> = alloc
+ .inspect_with_uninit_and_ptr_outside_interpreter(
+ alloc_range.start.bytes_usize()..alloc_range.end().bytes_usize(),
+ )
+ .iter()
+ .copied()
+ .map(Some)
+ .collect();
+ for (i, b) in bytes.iter_mut().enumerate() {
+ if !alloc
+ .init_mask()
+ .get(rustc_target::abi::Size::from_bytes(i + alloc_range.start.bytes_usize()))
+ {
+ *b = None;
+ }
+ }
+ let mut ptrs = Vec::new();
+ for (offset, prov) in alloc
+ .provenance()
+ .ptrs()
+ .iter()
+ .filter(|a| a.0 >= alloc_range.start && a.0 <= alloc_range.end())
+ {
+ ptrs.push((offset.bytes_usize() - alloc_range.start.bytes_usize(), tables.prov(*prov)));
+ }
+ Allocation {
+ bytes: bytes,
+ provenance: ProvenanceMap { ptrs },
+ align: alloc.align.bytes(),
+ mutability: alloc.mutability.stable(tables),
+ }
+}
diff --git a/compiler/rustc_smir/src/rustc_smir/mod.rs b/compiler/rustc_smir/src/rustc_smir/mod.rs
index 06b37008e..6e83b9601 100644
--- a/compiler/rustc_smir/src/rustc_smir/mod.rs
+++ b/compiler/rustc_smir/src/rustc_smir/mod.rs
@@ -1,5 +1,5 @@
//! Module that implements what will become the rustc side of Stable MIR.
-//!
+
//! This module is responsible for building Stable MIR components from internal components.
//!
//! This module is not intended to be invoked directly by users. It will eventually
@@ -7,21 +7,21 @@
//!
//! For now, we are developing everything inside `rustc`, thus, we keep this module private.
-use crate::rustc_internal::{self, opaque};
-use crate::stable_mir::mir::{CopyNonOverlapping, UserTypeProjection, VariantIdx};
-use crate::stable_mir::ty::{
- allocation_filter, new_allocation, Const, FloatTy, IntTy, Movability, RigidTy, TyKind, UintTy,
-};
-use crate::stable_mir::{self, Context};
+use crate::rustc_smir::hir::def::DefKind;
+use crate::rustc_smir::stable_mir::ty::{BoundRegion, EarlyBoundRegion, Region};
use rustc_hir as hir;
-use rustc_middle::mir::coverage::CodeRegion;
-use rustc_middle::mir::interpret::alloc_range;
-use rustc_middle::mir::{self, ConstantKind};
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{alloc_range, AllocId};
use rustc_middle::ty::{self, Ty, TyCtxt, Variance};
use rustc_span::def_id::{CrateNum, DefId, LOCAL_CRATE};
use rustc_target::abi::FieldIdx;
+use stable_mir::mir::{CopyNonOverlapping, UserTypeProjection, VariantIdx};
+use stable_mir::ty::{FloatTy, GenericParamDef, IntTy, Movability, RigidTy, Span, TyKind, UintTy};
+use stable_mir::{self, opaque, Context};
use tracing::debug;
+mod alloc;
+
impl<'tcx> Context for Tables<'tcx> {
fn local_crate(&self) -> stable_mir::Crate {
smir_crate(self.tcx, LOCAL_CRATE)
@@ -38,9 +38,26 @@ impl<'tcx> Context for Tables<'tcx> {
})
}
+ fn name_of_def_id(&self, def_id: stable_mir::DefId) -> String {
+ self.tcx.def_path_str(self[def_id])
+ }
+
+ fn print_span(&self, span: stable_mir::ty::Span) -> String {
+ self.tcx.sess.source_map().span_to_diagnostic_string(self[span])
+ }
+
+ fn def_kind(&mut self, def_id: stable_mir::DefId) -> stable_mir::DefKind {
+ self.tcx.def_kind(self[def_id]).stable(self)
+ }
+
+ fn span_of_an_item(&mut self, def_id: stable_mir::DefId) -> Span {
+ self.tcx.def_span(self[def_id]).stable(self)
+ }
+
fn all_local_items(&mut self) -> stable_mir::CrateItems {
self.tcx.mir_keys(()).iter().map(|item| self.crate_item(item.to_def_id())).collect()
}
+
fn entry_fn(&mut self) -> Option<stable_mir::CrateItem> {
Some(self.crate_item(self.tcx.entry_fn(())?.0))
}
@@ -54,7 +71,7 @@ impl<'tcx> Context for Tables<'tcx> {
}
fn trait_decl(&mut self, trait_def: &stable_mir::ty::TraitDef) -> stable_mir::ty::TraitDecl {
- let def_id = self.trait_def_id(trait_def);
+ let def_id = self[trait_def.0];
let trait_def = self.tcx.trait_def(def_id);
trait_def.stable(self)
}
@@ -68,14 +85,14 @@ impl<'tcx> Context for Tables<'tcx> {
}
fn trait_impl(&mut self, impl_def: &stable_mir::ty::ImplDef) -> stable_mir::ty::ImplTrait {
- let def_id = self.impl_trait_def_id(impl_def);
+ let def_id = self[impl_def.0];
let impl_trait = self.tcx.impl_trait_ref(def_id).unwrap();
impl_trait.stable(self)
}
- fn mir_body(&mut self, item: &stable_mir::CrateItem) -> stable_mir::mir::Body {
- let def_id = self.item_def_id(item);
- let mir = self.tcx.optimized_mir(def_id);
+ fn mir_body(&mut self, item: stable_mir::DefId) -> stable_mir::mir::Body {
+ let def_id = self[item];
+ let mir = self.tcx.instance_mir(ty::InstanceDef::Item(def_id));
stable_mir::mir::Body {
blocks: mir
.basic_blocks
@@ -93,29 +110,96 @@ impl<'tcx> Context for Tables<'tcx> {
}
}
- fn rustc_tables(&mut self, f: &mut dyn FnMut(&mut Tables<'_>)) {
- f(self)
+ fn ty_kind(&mut self, ty: stable_mir::ty::Ty) -> TyKind {
+ self.types[ty.0].clone().stable(self)
+ }
+
+ fn mk_ty(&mut self, kind: TyKind) -> stable_mir::ty::Ty {
+ let n = self.types.len();
+ self.types.push(MaybeStable::Stable(kind));
+ stable_mir::ty::Ty(n)
+ }
+
+ fn generics_of(&mut self, def_id: stable_mir::DefId) -> stable_mir::ty::Generics {
+ let def_id = self[def_id];
+ let generics = self.tcx.generics_of(def_id);
+ generics.stable(self)
+ }
+
+ fn predicates_of(&mut self, def_id: stable_mir::DefId) -> stable_mir::ty::GenericPredicates {
+ let def_id = self[def_id];
+ let ty::GenericPredicates { parent, predicates } = self.tcx.predicates_of(def_id);
+ stable_mir::ty::GenericPredicates {
+ parent: parent.map(|did| self.trait_def(did)),
+ predicates: predicates
+ .iter()
+ .map(|(clause, span)| {
+ (clause.as_predicate().kind().skip_binder().stable(self), span.stable(self))
+ })
+ .collect(),
+ }
+ }
+
+ fn explicit_predicates_of(
+ &mut self,
+ def_id: stable_mir::DefId,
+ ) -> stable_mir::ty::GenericPredicates {
+ let def_id = self[def_id];
+ let ty::GenericPredicates { parent, predicates } = self.tcx.explicit_predicates_of(def_id);
+ stable_mir::ty::GenericPredicates {
+ parent: parent.map(|did| self.trait_def(did)),
+ predicates: predicates
+ .iter()
+ .map(|(clause, span)| {
+ (clause.as_predicate().kind().skip_binder().stable(self), span.stable(self))
+ })
+ .collect(),
+ }
+ }
+}
+
+#[derive(Clone)]
+pub enum MaybeStable<S, R> {
+ Stable(S),
+ Rustc(R),
+}
+
+impl<'tcx, S, R> MaybeStable<S, R> {
+ fn stable(self, tables: &mut Tables<'tcx>) -> S
+ where
+ R: Stable<'tcx, T = S>,
+ {
+ match self {
+ MaybeStable::Stable(s) => s,
+ MaybeStable::Rustc(r) => r.stable(tables),
+ }
}
+}
- fn ty_kind(&mut self, ty: crate::stable_mir::ty::Ty) -> TyKind {
- let ty = self.types[ty.0];
- ty.stable(self)
+impl<S, R: PartialEq> PartialEq<R> for MaybeStable<S, R> {
+ fn eq(&self, other: &R) -> bool {
+ match self {
+ MaybeStable::Stable(_) => false,
+ MaybeStable::Rustc(r) => r == other,
+ }
}
}
pub struct Tables<'tcx> {
pub tcx: TyCtxt<'tcx>,
pub def_ids: Vec<DefId>,
- pub types: Vec<Ty<'tcx>>,
+ pub alloc_ids: Vec<AllocId>,
+ pub spans: Vec<rustc_span::Span>,
+ pub types: Vec<MaybeStable<stable_mir::ty::TyKind, Ty<'tcx>>>,
}
impl<'tcx> Tables<'tcx> {
fn intern_ty(&mut self, ty: Ty<'tcx>) -> stable_mir::ty::Ty {
- if let Some(id) = self.types.iter().position(|&t| t == ty) {
+ if let Some(id) = self.types.iter().position(|t| *t == ty) {
return stable_mir::ty::Ty(id);
}
let id = self.types.len();
- self.types.push(ty);
+ self.types.push(MaybeStable::Rustc(ty));
stable_mir::ty::Ty(id)
}
}
@@ -168,10 +252,7 @@ impl<'tcx> Stable<'tcx> for mir::Statement<'tcx> {
variance: variance.stable(tables),
}
}
- Coverage(coverage) => stable_mir::mir::Statement::Coverage(stable_mir::mir::Coverage {
- kind: coverage.kind.stable(tables),
- code_region: coverage.code_region.as_ref().map(|reg| reg.stable(tables)),
- }),
+ Coverage(coverage) => stable_mir::mir::Statement::Coverage(opaque(coverage)),
Intrinsic(intrinstic) => {
stable_mir::mir::Statement::Intrinsic(intrinstic.stable(tables))
}
@@ -188,17 +269,16 @@ impl<'tcx> Stable<'tcx> for mir::Rvalue<'tcx> {
match self {
Use(op) => stable_mir::mir::Rvalue::Use(op.stable(tables)),
Repeat(op, len) => {
- let cnst = ConstantKind::from_const(*len, tables.tcx);
- let len = Const { literal: cnst.stable(tables) };
+ let len = len.stable(tables);
stable_mir::mir::Rvalue::Repeat(op.stable(tables), len)
}
Ref(region, kind, place) => stable_mir::mir::Rvalue::Ref(
- opaque(region),
+ region.stable(tables),
kind.stable(tables),
place.stable(tables),
),
ThreadLocalRef(def_id) => {
- stable_mir::mir::Rvalue::ThreadLocalRef(rustc_internal::crate_item(*def_id))
+ stable_mir::mir::Rvalue::ThreadLocalRef(tables.crate_item(*def_id))
}
AddressOf(mutability, place) => {
stable_mir::mir::Rvalue::AddressOf(mutability.stable(tables), place.stable(tables))
@@ -255,7 +335,7 @@ impl<'tcx> Stable<'tcx> for mir::BorrowKind {
use mir::BorrowKind::*;
match *self {
Shared => stable_mir::mir::BorrowKind::Shared,
- Shallow => stable_mir::mir::BorrowKind::Shallow,
+ Fake => stable_mir::mir::BorrowKind::Fake,
Mut { kind } => stable_mir::mir::BorrowKind::Mut { kind: kind.stable(tables) },
}
}
@@ -377,8 +457,7 @@ impl<'tcx> Stable<'tcx> for ty::TermKind<'tcx> {
match self {
ty::TermKind::Ty(ty) => TermKind::Type(tables.intern_ty(*ty)),
ty::TermKind::Const(cnst) => {
- let cnst = ConstantKind::from_const(*cnst, tables.tcx);
- let cnst = Const { literal: cnst.stable(tables) };
+ let cnst = cnst.stable(tables);
TermKind::Const(cnst)
}
}
@@ -457,7 +536,19 @@ impl<'tcx> Stable<'tcx> for mir::Operand<'tcx> {
match self {
Copy(place) => stable_mir::mir::Operand::Copy(place.stable(tables)),
Move(place) => stable_mir::mir::Operand::Move(place.stable(tables)),
- Constant(c) => stable_mir::mir::Operand::Constant(c.to_string()),
+ Constant(c) => stable_mir::mir::Operand::Constant(c.stable(tables)),
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for mir::ConstOperand<'tcx> {
+ type T = stable_mir::mir::Constant;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ stable_mir::mir::Constant {
+ span: self.span.stable(tables),
+ user_ty: self.user_ty.map(|u| u.as_usize()).or(None),
+ literal: self.const_.stable(tables),
}
}
}
@@ -472,30 +563,6 @@ impl<'tcx> Stable<'tcx> for mir::Place<'tcx> {
}
}
-impl<'tcx> Stable<'tcx> for mir::coverage::CoverageKind {
- type T = stable_mir::mir::CoverageKind;
- fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
- use rustc_middle::mir::coverage::CoverageKind;
- match self {
- CoverageKind::Counter { function_source_hash, id } => {
- stable_mir::mir::CoverageKind::Counter {
- function_source_hash: *function_source_hash as usize,
- id: opaque(id),
- }
- }
- CoverageKind::Expression { id, lhs, op, rhs } => {
- stable_mir::mir::CoverageKind::Expression {
- id: opaque(id),
- lhs: opaque(lhs),
- op: op.stable(tables),
- rhs: opaque(rhs),
- }
- }
- CoverageKind::Unreachable => stable_mir::mir::CoverageKind::Unreachable,
- }
- }
-}
-
impl<'tcx> Stable<'tcx> for mir::UserTypeProjection {
type T = stable_mir::mir::UserTypeProjection;
@@ -504,18 +571,6 @@ impl<'tcx> Stable<'tcx> for mir::UserTypeProjection {
}
}
-impl<'tcx> Stable<'tcx> for mir::coverage::Op {
- type T = stable_mir::mir::Op;
-
- fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
- use rustc_middle::mir::coverage::Op::*;
- match self {
- Subtract => stable_mir::mir::Op::Subtract,
- Add => stable_mir::mir::Op::Add,
- }
- }
-}
-
impl<'tcx> Stable<'tcx> for mir::Local {
type T = stable_mir::mir::Local;
fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
@@ -562,20 +617,6 @@ impl<'tcx> Stable<'tcx> for ty::UserTypeAnnotationIndex {
}
}
-impl<'tcx> Stable<'tcx> for CodeRegion {
- type T = stable_mir::mir::CodeRegion;
-
- fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
- stable_mir::mir::CodeRegion {
- file_name: self.file_name.as_str().to_string(),
- start_line: self.start_line as usize,
- start_col: self.start_col as usize,
- end_line: self.end_line as usize,
- end_col: self.end_col as usize,
- }
- }
-}
-
impl<'tcx> Stable<'tcx> for mir::UnwindAction {
type T = stable_mir::mir::UnwindAction;
fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
@@ -583,7 +624,7 @@ impl<'tcx> Stable<'tcx> for mir::UnwindAction {
match self {
UnwindAction::Continue => stable_mir::mir::UnwindAction::Continue,
UnwindAction::Unreachable => stable_mir::mir::UnwindAction::Unreachable,
- UnwindAction::Terminate => stable_mir::mir::UnwindAction::Terminate,
+ UnwindAction::Terminate(_) => stable_mir::mir::UnwindAction::Terminate,
UnwindAction::Cleanup(bb) => stable_mir::mir::UnwindAction::Cleanup(bb.as_usize()),
}
}
@@ -700,7 +741,7 @@ impl<'tcx> Stable<'tcx> for mir::AggregateKind<'tcx> {
mir::AggregateKind::Tuple => stable_mir::mir::AggregateKind::Tuple,
mir::AggregateKind::Adt(def_id, var_idx, generic_arg, user_ty_index, field_idx) => {
stable_mir::mir::AggregateKind::Adt(
- rustc_internal::adt_def(*def_id),
+ tables.adt_def(*def_id),
var_idx.index(),
generic_arg.stable(tables),
user_ty_index.map(|idx| idx.index()),
@@ -709,13 +750,13 @@ impl<'tcx> Stable<'tcx> for mir::AggregateKind<'tcx> {
}
mir::AggregateKind::Closure(def_id, generic_arg) => {
stable_mir::mir::AggregateKind::Closure(
- rustc_internal::closure_def(*def_id),
+ tables.closure_def(*def_id),
generic_arg.stable(tables),
)
}
mir::AggregateKind::Generator(def_id, generic_arg, movability) => {
stable_mir::mir::AggregateKind::Generator(
- rustc_internal::generator_def(*def_id),
+ tables.generator_def(*def_id),
generic_arg.stable(tables),
movability.stable(tables),
)
@@ -780,8 +821,8 @@ impl<'tcx> Stable<'tcx> for mir::Terminator<'tcx> {
.collect(),
otherwise: targets.otherwise().as_usize(),
},
- Resume => Terminator::Resume,
- Terminate => Terminator::Abort,
+ UnwindResume => Terminator::Resume,
+ UnwindTerminate(_) => Terminator::Abort,
Return => Terminator::Return,
Unreachable => Terminator::Unreachable,
Drop { place, target, unwind, replace: _ } => Terminator::Drop {
@@ -835,12 +876,9 @@ impl<'tcx> Stable<'tcx> for ty::GenericArgKind<'tcx> {
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
use stable_mir::ty::GenericArgKind;
match self {
- ty::GenericArgKind::Lifetime(region) => GenericArgKind::Lifetime(opaque(region)),
+ ty::GenericArgKind::Lifetime(region) => GenericArgKind::Lifetime(region.stable(tables)),
ty::GenericArgKind::Type(ty) => GenericArgKind::Type(tables.intern_ty(*ty)),
- ty::GenericArgKind::Const(cnst) => {
- let cnst = ConstantKind::from_const(*cnst, tables.tcx);
- GenericArgKind::Const(stable_mir::ty::Const { literal: cnst.stable(tables) })
- }
+ ty::GenericArgKind::Const(cnst) => GenericArgKind::Const(cnst.stable(tables)),
}
}
}
@@ -928,13 +966,13 @@ impl<'tcx> Stable<'tcx> for ty::FnSig<'tcx> {
impl<'tcx> Stable<'tcx> for ty::BoundTyKind {
type T = stable_mir::ty::BoundTyKind;
- fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
use stable_mir::ty::BoundTyKind;
match self {
ty::BoundTyKind::Anon => BoundTyKind::Anon,
ty::BoundTyKind::Param(def_id, symbol) => {
- BoundTyKind::Param(rustc_internal::param_def(*def_id), symbol.to_string())
+ BoundTyKind::Param(tables.param_def(*def_id), symbol.to_string())
}
}
}
@@ -943,15 +981,13 @@ impl<'tcx> Stable<'tcx> for ty::BoundTyKind {
impl<'tcx> Stable<'tcx> for ty::BoundRegionKind {
type T = stable_mir::ty::BoundRegionKind;
- fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
use stable_mir::ty::BoundRegionKind;
match self {
- ty::BoundRegionKind::BrAnon(option_span) => {
- BoundRegionKind::BrAnon(option_span.map(|span| opaque(&span)))
- }
+ ty::BoundRegionKind::BrAnon => BoundRegionKind::BrAnon,
ty::BoundRegionKind::BrNamed(def_id, symbol) => {
- BoundRegionKind::BrNamed(rustc_internal::br_named_def(*def_id), symbol.to_string())
+ BoundRegionKind::BrNamed(tables.br_named_def(*def_id), symbol.to_string())
}
ty::BoundRegionKind::BrEnv => BoundRegionKind::BrEnv,
}
@@ -1038,31 +1074,26 @@ impl<'tcx> Stable<'tcx> for Ty<'tcx> {
ty::Uint(uint_ty) => TyKind::RigidTy(RigidTy::Uint(uint_ty.stable(tables))),
ty::Float(float_ty) => TyKind::RigidTy(RigidTy::Float(float_ty.stable(tables))),
ty::Adt(adt_def, generic_args) => TyKind::RigidTy(RigidTy::Adt(
- rustc_internal::adt_def(adt_def.did()),
+ tables.adt_def(adt_def.did()),
generic_args.stable(tables),
)),
- ty::Foreign(def_id) => {
- TyKind::RigidTy(RigidTy::Foreign(rustc_internal::foreign_def(*def_id)))
- }
+ ty::Foreign(def_id) => TyKind::RigidTy(RigidTy::Foreign(tables.foreign_def(*def_id))),
ty::Str => TyKind::RigidTy(RigidTy::Str),
ty::Array(ty, constant) => {
- let cnst = ConstantKind::from_const(*constant, tables.tcx);
- let cnst = stable_mir::ty::Const { literal: cnst.stable(tables) };
- TyKind::RigidTy(RigidTy::Array(tables.intern_ty(*ty), cnst))
+ TyKind::RigidTy(RigidTy::Array(tables.intern_ty(*ty), constant.stable(tables)))
}
ty::Slice(ty) => TyKind::RigidTy(RigidTy::Slice(tables.intern_ty(*ty))),
ty::RawPtr(ty::TypeAndMut { ty, mutbl }) => {
TyKind::RigidTy(RigidTy::RawPtr(tables.intern_ty(*ty), mutbl.stable(tables)))
}
ty::Ref(region, ty, mutbl) => TyKind::RigidTy(RigidTy::Ref(
- opaque(region),
+ region.stable(tables),
tables.intern_ty(*ty),
mutbl.stable(tables),
)),
- ty::FnDef(def_id, generic_args) => TyKind::RigidTy(RigidTy::FnDef(
- rustc_internal::fn_def(*def_id),
- generic_args.stable(tables),
- )),
+ ty::FnDef(def_id, generic_args) => {
+ TyKind::RigidTy(RigidTy::FnDef(tables.fn_def(*def_id), generic_args.stable(tables)))
+ }
ty::FnPtr(poly_fn_sig) => TyKind::RigidTy(RigidTy::FnPtr(poly_fn_sig.stable(tables))),
ty::Dynamic(existential_predicates, region, dyn_kind) => {
TyKind::RigidTy(RigidTy::Dynamic(
@@ -1070,16 +1101,16 @@ impl<'tcx> Stable<'tcx> for Ty<'tcx> {
.iter()
.map(|existential_predicate| existential_predicate.stable(tables))
.collect(),
- opaque(region),
+ region.stable(tables),
dyn_kind.stable(tables),
))
}
ty::Closure(def_id, generic_args) => TyKind::RigidTy(RigidTy::Closure(
- rustc_internal::closure_def(*def_id),
+ tables.closure_def(*def_id),
generic_args.stable(tables),
)),
ty::Generator(def_id, generic_args, movability) => TyKind::RigidTy(RigidTy::Generator(
- rustc_internal::generator_def(*def_id),
+ tables.generator_def(*def_id),
generic_args.stable(tables),
movability.stable(tables),
)),
@@ -1094,17 +1125,54 @@ impl<'tcx> Stable<'tcx> for Ty<'tcx> {
ty::Bound(debruijn_idx, bound_ty) => {
TyKind::Bound(debruijn_idx.as_usize(), bound_ty.stable(tables))
}
- ty::Placeholder(..)
- | ty::GeneratorWitness(_)
- | ty::GeneratorWitnessMIR(_, _)
- | ty::Infer(_)
- | ty::Error(_) => {
+ ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) | ty::Error(_) => {
unreachable!();
}
}
}
}
+impl<'tcx> Stable<'tcx> for ty::Const<'tcx> {
+ type T = stable_mir::ty::Const;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ stable_mir::ty::Const {
+ literal: match self.kind() {
+ ty::Value(val) => {
+ let const_val = tables.tcx.valtree_to_const_val((self.ty(), val));
+ stable_mir::ty::ConstantKind::Allocated(alloc::new_allocation(
+ self.ty(),
+ const_val,
+ tables,
+ ))
+ }
+ ty::ParamCt(param) => stable_mir::ty::ConstantKind::Param(param.stable(tables)),
+ ty::ErrorCt(_) => unreachable!(),
+ ty::InferCt(_) => unreachable!(),
+ ty::BoundCt(_, _) => unimplemented!(),
+ ty::PlaceholderCt(_) => unimplemented!(),
+ ty::Unevaluated(uv) => {
+ stable_mir::ty::ConstantKind::Unevaluated(stable_mir::ty::UnevaluatedConst {
+ def: tables.const_def(uv.def),
+ args: uv.args.stable(tables),
+ promoted: None,
+ })
+ }
+ ty::ExprCt(_) => unimplemented!(),
+ },
+ ty: tables.intern_ty(self.ty()),
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::ParamConst {
+ type T = stable_mir::ty::ParamConst;
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ use stable_mir::ty::ParamConst;
+ ParamConst { index: self.index, name: self.name.to_string() }
+ }
+}
+
impl<'tcx> Stable<'tcx> for ty::ParamTy {
type T = stable_mir::ty::ParamTy;
fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
@@ -1125,7 +1193,11 @@ impl<'tcx> Stable<'tcx> for mir::interpret::Allocation {
type T = stable_mir::ty::Allocation;
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
- allocation_filter(self, alloc_range(rustc_target::abi::Size::ZERO, self.size()), tables)
+ alloc::allocation_filter(
+ self,
+ alloc_range(rustc_target::abi::Size::ZERO, self.size()),
+ tables,
+ )
}
}
@@ -1150,7 +1222,7 @@ impl<'tcx> Stable<'tcx> for ty::TraitDef {
use stable_mir::ty::TraitDecl;
TraitDecl {
- def_id: rustc_internal::trait_def(self.def_id),
+ def_id: tables.trait_def(self.def_id),
unsafety: self.unsafety.stable(tables),
paren_sugar: self.paren_sugar,
has_auto_impl: self.has_auto_impl,
@@ -1168,31 +1240,28 @@ impl<'tcx> Stable<'tcx> for ty::TraitDef {
}
}
-impl<'tcx> Stable<'tcx> for rustc_middle::mir::ConstantKind<'tcx> {
- type T = stable_mir::ty::ConstantKind;
+impl<'tcx> Stable<'tcx> for rustc_middle::mir::Const<'tcx> {
+ type T = stable_mir::ty::Const;
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
- match self {
- ConstantKind::Ty(c) => match c.kind() {
- ty::Value(val) => {
- let const_val = tables.tcx.valtree_to_const_val((c.ty(), val));
- stable_mir::ty::ConstantKind::Allocated(new_allocation(self, const_val, tables))
- }
- ty::ParamCt(param) => stable_mir::ty::ConstantKind::ParamCt(opaque(&param)),
- ty::ErrorCt(_) => unreachable!(),
- _ => unimplemented!(),
+ match *self {
+ mir::Const::Ty(c) => c.stable(tables),
+ mir::Const::Unevaluated(unev_const, ty) => stable_mir::ty::Const {
+ literal: stable_mir::ty::ConstantKind::Unevaluated(
+ stable_mir::ty::UnevaluatedConst {
+ def: tables.const_def(unev_const.def),
+ args: unev_const.args.stable(tables),
+ promoted: unev_const.promoted.map(|u| u.as_u32()),
+ },
+ ),
+ ty: tables.intern_ty(ty),
+ },
+ mir::Const::Val(val, ty) => stable_mir::ty::Const {
+ literal: stable_mir::ty::ConstantKind::Allocated(alloc::new_allocation(
+ ty, val, tables,
+ )),
+ ty: tables.intern_ty(ty),
},
- ConstantKind::Unevaluated(unev_const, ty) => {
- stable_mir::ty::ConstantKind::Unevaluated(stable_mir::ty::UnevaluatedConst {
- ty: tables.intern_ty(*ty),
- def: tables.const_def(unev_const.def),
- args: unev_const.args.stable(tables),
- promoted: unev_const.promoted.map(|u| u.as_u32()),
- })
- }
- ConstantKind::Val(val, _) => {
- stable_mir::ty::ConstantKind::Allocated(new_allocation(self, *val, tables))
- }
}
}
}
@@ -1202,6 +1271,285 @@ impl<'tcx> Stable<'tcx> for ty::TraitRef<'tcx> {
fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
use stable_mir::ty::TraitRef;
- TraitRef { def_id: rustc_internal::trait_def(self.def_id), args: self.args.stable(tables) }
+ TraitRef { def_id: tables.trait_def(self.def_id), args: self.args.stable(tables) }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::Generics {
+ type T = stable_mir::ty::Generics;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use stable_mir::ty::Generics;
+
+ let params: Vec<_> = self.params.iter().map(|param| param.stable(tables)).collect();
+ let param_def_id_to_index =
+ params.iter().map(|param| (param.def_id, param.index)).collect();
+
+ Generics {
+ parent: self.parent.map(|did| tables.generic_def(did)),
+ parent_count: self.parent_count,
+ params,
+ param_def_id_to_index,
+ has_self: self.has_self,
+ has_late_bound_regions: self
+ .has_late_bound_regions
+ .as_ref()
+ .map(|late_bound_regions| late_bound_regions.stable(tables)),
+ host_effect_index: self.host_effect_index,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for rustc_middle::ty::GenericParamDefKind {
+ type T = stable_mir::ty::GenericParamDefKind;
+
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ use stable_mir::ty::GenericParamDefKind;
+ match self {
+ ty::GenericParamDefKind::Lifetime => GenericParamDefKind::Lifetime,
+ ty::GenericParamDefKind::Type { has_default, synthetic } => {
+ GenericParamDefKind::Type { has_default: *has_default, synthetic: *synthetic }
+ }
+ ty::GenericParamDefKind::Const { has_default, is_host_effect: _ } => {
+ GenericParamDefKind::Const { has_default: *has_default }
+ }
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for rustc_middle::ty::GenericParamDef {
+ type T = stable_mir::ty::GenericParamDef;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ GenericParamDef {
+ name: self.name.to_string(),
+ def_id: tables.generic_def(self.def_id),
+ index: self.index,
+ pure_wrt_drop: self.pure_wrt_drop,
+ kind: self.kind.stable(tables),
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::PredicateKind<'tcx> {
+ type T = stable_mir::ty::PredicateKind;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use ty::PredicateKind;
+ match self {
+ PredicateKind::Clause(clause_kind) => {
+ stable_mir::ty::PredicateKind::Clause(clause_kind.stable(tables))
+ }
+ PredicateKind::ObjectSafe(did) => {
+ stable_mir::ty::PredicateKind::ObjectSafe(tables.trait_def(*did))
+ }
+ PredicateKind::ClosureKind(did, generic_args, closure_kind) => {
+ stable_mir::ty::PredicateKind::ClosureKind(
+ tables.closure_def(*did),
+ generic_args.stable(tables),
+ closure_kind.stable(tables),
+ )
+ }
+ PredicateKind::Subtype(subtype_predicate) => {
+ stable_mir::ty::PredicateKind::SubType(subtype_predicate.stable(tables))
+ }
+ PredicateKind::Coerce(coerce_predicate) => {
+ stable_mir::ty::PredicateKind::Coerce(coerce_predicate.stable(tables))
+ }
+ PredicateKind::ConstEquate(a, b) => {
+ stable_mir::ty::PredicateKind::ConstEquate(a.stable(tables), b.stable(tables))
+ }
+ PredicateKind::Ambiguous => stable_mir::ty::PredicateKind::Ambiguous,
+ PredicateKind::AliasRelate(a, b, alias_relation_direction) => {
+ stable_mir::ty::PredicateKind::AliasRelate(
+ a.unpack().stable(tables),
+ b.unpack().stable(tables),
+ alias_relation_direction.stable(tables),
+ )
+ }
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::ClauseKind<'tcx> {
+ type T = stable_mir::ty::ClauseKind;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use ty::ClauseKind::*;
+ match *self {
+ Trait(trait_object) => stable_mir::ty::ClauseKind::Trait(trait_object.stable(tables)),
+ RegionOutlives(region_outlives) => {
+ stable_mir::ty::ClauseKind::RegionOutlives(region_outlives.stable(tables))
+ }
+ TypeOutlives(type_outlives) => {
+ let ty::OutlivesPredicate::<_, _>(a, b) = type_outlives;
+ stable_mir::ty::ClauseKind::TypeOutlives(stable_mir::ty::OutlivesPredicate(
+ tables.intern_ty(a),
+ b.stable(tables),
+ ))
+ }
+ Projection(projection_predicate) => {
+ stable_mir::ty::ClauseKind::Projection(projection_predicate.stable(tables))
+ }
+ ConstArgHasType(const_, ty) => stable_mir::ty::ClauseKind::ConstArgHasType(
+ const_.stable(tables),
+ tables.intern_ty(ty),
+ ),
+ WellFormed(generic_arg) => {
+ stable_mir::ty::ClauseKind::WellFormed(generic_arg.unpack().stable(tables))
+ }
+ ConstEvaluatable(const_) => {
+ stable_mir::ty::ClauseKind::ConstEvaluatable(const_.stable(tables))
+ }
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::ClosureKind {
+ type T = stable_mir::ty::ClosureKind;
+
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ use ty::ClosureKind::*;
+ match self {
+ Fn => stable_mir::ty::ClosureKind::Fn,
+ FnMut => stable_mir::ty::ClosureKind::FnMut,
+ FnOnce => stable_mir::ty::ClosureKind::FnOnce,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::SubtypePredicate<'tcx> {
+ type T = stable_mir::ty::SubtypePredicate;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ let ty::SubtypePredicate { a, b, a_is_expected: _ } = self;
+ stable_mir::ty::SubtypePredicate { a: tables.intern_ty(*a), b: tables.intern_ty(*b) }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::CoercePredicate<'tcx> {
+ type T = stable_mir::ty::CoercePredicate;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ let ty::CoercePredicate { a, b } = self;
+ stable_mir::ty::CoercePredicate { a: tables.intern_ty(*a), b: tables.intern_ty(*b) }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::AliasRelationDirection {
+ type T = stable_mir::ty::AliasRelationDirection;
+
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ use ty::AliasRelationDirection::*;
+ match self {
+ Equate => stable_mir::ty::AliasRelationDirection::Equate,
+ Subtype => stable_mir::ty::AliasRelationDirection::Subtype,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::TraitPredicate<'tcx> {
+ type T = stable_mir::ty::TraitPredicate;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ let ty::TraitPredicate { trait_ref, polarity } = self;
+ stable_mir::ty::TraitPredicate {
+ trait_ref: trait_ref.stable(tables),
+ polarity: polarity.stable(tables),
+ }
+ }
+}
+
+impl<'tcx, A, B, U, V> Stable<'tcx> for ty::OutlivesPredicate<A, B>
+where
+ A: Stable<'tcx, T = U>,
+ B: Stable<'tcx, T = V>,
+{
+ type T = stable_mir::ty::OutlivesPredicate<U, V>;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ let ty::OutlivesPredicate(a, b) = self;
+ stable_mir::ty::OutlivesPredicate(a.stable(tables), b.stable(tables))
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::ProjectionPredicate<'tcx> {
+ type T = stable_mir::ty::ProjectionPredicate;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ let ty::ProjectionPredicate { projection_ty, term } = self;
+ stable_mir::ty::ProjectionPredicate {
+ projection_ty: projection_ty.stable(tables),
+ term: term.unpack().stable(tables),
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::ImplPolarity {
+ type T = stable_mir::ty::ImplPolarity;
+
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ use ty::ImplPolarity::*;
+ match self {
+ Positive => stable_mir::ty::ImplPolarity::Positive,
+ Negative => stable_mir::ty::ImplPolarity::Negative,
+ Reservation => stable_mir::ty::ImplPolarity::Reservation,
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::Region<'tcx> {
+ type T = stable_mir::ty::Region;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ Region { kind: self.kind().stable(tables) }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for ty::RegionKind<'tcx> {
+ type T = stable_mir::ty::RegionKind;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ use stable_mir::ty::RegionKind;
+ match self {
+ ty::ReEarlyBound(early_reg) => RegionKind::ReEarlyBound(EarlyBoundRegion {
+ def_id: tables.region_def(early_reg.def_id),
+ index: early_reg.index,
+ name: early_reg.name.to_string(),
+ }),
+ ty::ReLateBound(db_index, bound_reg) => RegionKind::ReLateBound(
+ db_index.as_u32(),
+ BoundRegion { var: bound_reg.var.as_u32(), kind: bound_reg.kind.stable(tables) },
+ ),
+ ty::ReStatic => RegionKind::ReStatic,
+ ty::RePlaceholder(place_holder) => {
+ RegionKind::RePlaceholder(stable_mir::ty::Placeholder {
+ universe: place_holder.universe.as_u32(),
+ bound: BoundRegion {
+ var: place_holder.bound.var.as_u32(),
+ kind: place_holder.bound.kind.stable(tables),
+ },
+ })
+ }
+ ty::ReErased => RegionKind::ReErased,
+ _ => unreachable!("{self:?}"),
+ }
+ }
+}
+
+impl<'tcx> Stable<'tcx> for rustc_span::Span {
+ type T = stable_mir::ty::Span;
+
+ fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T {
+ tables.create_span(*self)
+ }
+}
+
+impl<'tcx> Stable<'tcx> for DefKind {
+ type T = stable_mir::DefKind;
+
+ fn stable(&self, _: &mut Tables<'tcx>) -> Self::T {
+ // FIXME: add a real implementation of stable DefKind
+ opaque(self)
}
}
diff --git a/compiler/rustc_smir/src/stable_mir/ty.rs b/compiler/rustc_smir/src/stable_mir/ty.rs
deleted file mode 100644
index 7a6601f09..000000000
--- a/compiler/rustc_smir/src/stable_mir/ty.rs
+++ /dev/null
@@ -1,463 +0,0 @@
-use rustc_middle::mir::interpret::{alloc_range, AllocRange, ConstValue, Pointer};
-
-use super::{mir::Mutability, mir::Safety, with, DefId};
-use crate::{
- rustc_internal::{opaque, Opaque},
- rustc_smir::{Stable, Tables},
-};
-
-#[derive(Copy, Clone, Debug)]
-pub struct Ty(pub usize);
-
-impl Ty {
- pub fn kind(&self) -> TyKind {
- with(|context| context.ty_kind(*self))
- }
-}
-
-#[derive(Debug, Clone)]
-pub struct Const {
- pub literal: ConstantKind,
-}
-
-type Ident = Opaque;
-pub(crate) type Region = Opaque;
-type Span = Opaque;
-
-#[derive(Clone, Debug)]
-pub enum TyKind {
- RigidTy(RigidTy),
- Alias(AliasKind, AliasTy),
- Param(ParamTy),
- Bound(usize, BoundTy),
-}
-
-#[derive(Clone, Debug)]
-pub enum RigidTy {
- Bool,
- Char,
- Int(IntTy),
- Uint(UintTy),
- Float(FloatTy),
- Adt(AdtDef, GenericArgs),
- Foreign(ForeignDef),
- Str,
- Array(Ty, Const),
- Slice(Ty),
- RawPtr(Ty, Mutability),
- Ref(Region, Ty, Mutability),
- FnDef(FnDef, GenericArgs),
- FnPtr(PolyFnSig),
- Closure(ClosureDef, GenericArgs),
- Generator(GeneratorDef, GenericArgs, Movability),
- Dynamic(Vec<Binder<ExistentialPredicate>>, Region, DynKind),
- Never,
- Tuple(Vec<Ty>),
-}
-
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub enum IntTy {
- Isize,
- I8,
- I16,
- I32,
- I64,
- I128,
-}
-
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub enum UintTy {
- Usize,
- U8,
- U16,
- U32,
- U64,
- U128,
-}
-
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub enum FloatTy {
- F32,
- F64,
-}
-
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub enum Movability {
- Static,
- Movable,
-}
-
-#[derive(Clone, PartialEq, Eq, Debug)]
-pub struct ForeignDef(pub(crate) DefId);
-
-#[derive(Clone, PartialEq, Eq, Debug)]
-pub struct FnDef(pub(crate) DefId);
-
-#[derive(Clone, PartialEq, Eq, Debug)]
-pub struct ClosureDef(pub(crate) DefId);
-
-#[derive(Clone, PartialEq, Eq, Debug)]
-pub struct GeneratorDef(pub(crate) DefId);
-
-#[derive(Clone, PartialEq, Eq, Debug)]
-pub struct ParamDef(pub(crate) DefId);
-
-#[derive(Clone, PartialEq, Eq, Debug)]
-pub struct BrNamedDef(pub(crate) DefId);
-
-#[derive(Clone, PartialEq, Eq, Debug)]
-pub struct AdtDef(pub(crate) DefId);
-
-#[derive(Clone, PartialEq, Eq, Debug)]
-pub struct AliasDef(pub(crate) DefId);
-
-#[derive(Clone, PartialEq, Eq, Debug)]
-pub struct TraitDef(pub(crate) DefId);
-
-#[derive(Clone, PartialEq, Eq, Debug)]
-pub struct ConstDef(pub(crate) DefId);
-
-impl TraitDef {
- pub fn trait_decl(&self) -> TraitDecl {
- with(|cx| cx.trait_decl(self))
- }
-}
-
-#[derive(Clone, PartialEq, Eq, Debug)]
-pub struct ImplDef(pub(crate) DefId);
-
-impl ImplDef {
- pub fn trait_impl(&self) -> ImplTrait {
- with(|cx| cx.trait_impl(self))
- }
-}
-
-#[derive(Clone, Debug)]
-pub struct GenericArgs(pub Vec<GenericArgKind>);
-
-#[derive(Clone, Debug)]
-pub enum GenericArgKind {
- Lifetime(Region),
- Type(Ty),
- Const(Const),
-}
-
-#[derive(Clone, Debug)]
-pub enum TermKind {
- Type(Ty),
- Const(Const),
-}
-
-#[derive(Clone, Debug)]
-pub enum AliasKind {
- Projection,
- Inherent,
- Opaque,
- Weak,
-}
-
-#[derive(Clone, Debug)]
-pub struct AliasTy {
- pub def_id: AliasDef,
- pub args: GenericArgs,
-}
-
-pub type PolyFnSig = Binder<FnSig>;
-
-#[derive(Clone, Debug)]
-pub struct FnSig {
- pub inputs_and_output: Vec<Ty>,
- pub c_variadic: bool,
- pub unsafety: Safety,
- pub abi: Abi,
-}
-
-#[derive(Clone, PartialEq, Eq, Debug)]
-pub enum Abi {
- Rust,
- C { unwind: bool },
- Cdecl { unwind: bool },
- Stdcall { unwind: bool },
- Fastcall { unwind: bool },
- Vectorcall { unwind: bool },
- Thiscall { unwind: bool },
- Aapcs { unwind: bool },
- Win64 { unwind: bool },
- SysV64 { unwind: bool },
- PtxKernel,
- Msp430Interrupt,
- X86Interrupt,
- AmdGpuKernel,
- EfiApi,
- AvrInterrupt,
- AvrNonBlockingInterrupt,
- CCmseNonSecureCall,
- Wasm,
- System { unwind: bool },
- RustIntrinsic,
- RustCall,
- PlatformIntrinsic,
- Unadjusted,
- RustCold,
- RiscvInterruptM,
- RiscvInterruptS,
-}
-
-#[derive(Clone, Debug)]
-pub struct Binder<T> {
- pub value: T,
- pub bound_vars: Vec<BoundVariableKind>,
-}
-
-#[derive(Clone, Debug)]
-pub struct EarlyBinder<T> {
- pub value: T,
-}
-
-#[derive(Clone, Debug)]
-pub enum BoundVariableKind {
- Ty(BoundTyKind),
- Region(BoundRegionKind),
- Const,
-}
-
-#[derive(Clone, PartialEq, Eq, Debug)]
-pub enum BoundTyKind {
- Anon,
- Param(ParamDef, String),
-}
-
-#[derive(Clone, Debug)]
-pub enum BoundRegionKind {
- BrAnon(Option<Span>),
- BrNamed(BrNamedDef, String),
- BrEnv,
-}
-
-#[derive(Clone, Debug)]
-pub enum DynKind {
- Dyn,
- DynStar,
-}
-
-#[derive(Clone, Debug)]
-pub enum ExistentialPredicate {
- Trait(ExistentialTraitRef),
- Projection(ExistentialProjection),
- AutoTrait(TraitDef),
-}
-
-#[derive(Clone, Debug)]
-pub struct ExistentialTraitRef {
- pub def_id: TraitDef,
- pub generic_args: GenericArgs,
-}
-
-#[derive(Clone, Debug)]
-pub struct ExistentialProjection {
- pub def_id: TraitDef,
- pub generic_args: GenericArgs,
- pub term: TermKind,
-}
-
-#[derive(Clone, Debug)]
-pub struct ParamTy {
- pub index: u32,
- pub name: String,
-}
-
-#[derive(Clone, Debug)]
-pub struct BoundTy {
- pub var: usize,
- pub kind: BoundTyKind,
-}
-
-pub type Bytes = Vec<Option<u8>>;
-pub type Size = usize;
-pub type Prov = Opaque;
-pub type Align = u64;
-pub type Promoted = u32;
-pub type InitMaskMaterialized = Vec<u64>;
-
-/// Stores the provenance information of pointers stored in memory.
-#[derive(Clone, Debug)]
-pub struct ProvenanceMap {
- /// Provenance in this map applies from the given offset for an entire pointer-size worth of
- /// bytes. Two entries in this map are always at least a pointer size apart.
- pub ptrs: Vec<(Size, Prov)>,
-}
-
-#[derive(Clone, Debug)]
-pub struct Allocation {
- pub bytes: Bytes,
- pub provenance: ProvenanceMap,
- pub align: Align,
- pub mutability: Mutability,
-}
-
-impl Allocation {
- /// Creates new empty `Allocation` from given `Align`.
- fn new_empty_allocation(align: rustc_target::abi::Align) -> Allocation {
- Allocation {
- bytes: Vec::new(),
- provenance: ProvenanceMap { ptrs: Vec::new() },
- align: align.bytes(),
- mutability: Mutability::Not,
- }
- }
-}
-
-// We need this method instead of a Stable implementation
-// because we need to get `Ty` of the const we are trying to create, to do that
-// we need to have access to `ConstantKind` but we can't access that inside Stable impl.
-pub fn new_allocation<'tcx>(
- const_kind: &rustc_middle::mir::ConstantKind<'tcx>,
- const_value: ConstValue<'tcx>,
- tables: &mut Tables<'tcx>,
-) -> Allocation {
- match const_value {
- ConstValue::Scalar(scalar) => {
- let size = scalar.size();
- let align = tables
- .tcx
- .layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(const_kind.ty()))
- .unwrap()
- .align;
- let mut allocation = rustc_middle::mir::interpret::Allocation::uninit(size, align.abi);
- allocation
- .write_scalar(&tables.tcx, alloc_range(rustc_target::abi::Size::ZERO, size), scalar)
- .unwrap();
- allocation.stable(tables)
- }
- ConstValue::ZeroSized => {
- let align = tables
- .tcx
- .layout_of(rustc_middle::ty::ParamEnv::empty().and(const_kind.ty()))
- .unwrap()
- .align;
- Allocation::new_empty_allocation(align.abi)
- }
- ConstValue::Slice { data, start, end } => {
- let alloc_id = tables.tcx.create_memory_alloc(data);
- let ptr = Pointer::new(alloc_id, rustc_target::abi::Size::from_bytes(start));
- let scalar_ptr = rustc_middle::mir::interpret::Scalar::from_pointer(ptr, &tables.tcx);
- let scalar_len = rustc_middle::mir::interpret::Scalar::from_target_usize(
- (end - start) as u64,
- &tables.tcx,
- );
- let layout = tables
- .tcx
- .layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(const_kind.ty()))
- .unwrap();
- let mut allocation =
- rustc_middle::mir::interpret::Allocation::uninit(layout.size, layout.align.abi);
- allocation
- .write_scalar(
- &tables.tcx,
- alloc_range(rustc_target::abi::Size::ZERO, tables.tcx.data_layout.pointer_size),
- scalar_ptr,
- )
- .unwrap();
- allocation
- .write_scalar(
- &tables.tcx,
- alloc_range(tables.tcx.data_layout.pointer_size, scalar_len.size()),
- scalar_len,
- )
- .unwrap();
- allocation.stable(tables)
- }
- ConstValue::ByRef { alloc, offset } => {
- let ty_size = tables
- .tcx
- .layout_of(rustc_middle::ty::ParamEnv::reveal_all().and(const_kind.ty()))
- .unwrap()
- .size;
- allocation_filter(&alloc.0, alloc_range(offset, ty_size), tables)
- }
- }
-}
-
-/// Creates an `Allocation` only from information within the `AllocRange`.
-pub fn allocation_filter<'tcx>(
- alloc: &rustc_middle::mir::interpret::Allocation,
- alloc_range: AllocRange,
- tables: &mut Tables<'tcx>,
-) -> Allocation {
- let mut bytes: Vec<Option<u8>> = alloc
- .inspect_with_uninit_and_ptr_outside_interpreter(
- alloc_range.start.bytes_usize()..alloc_range.end().bytes_usize(),
- )
- .iter()
- .copied()
- .map(Some)
- .collect();
- for (i, b) in bytes.iter_mut().enumerate() {
- if !alloc
- .init_mask()
- .get(rustc_target::abi::Size::from_bytes(i + alloc_range.start.bytes_usize()))
- {
- *b = None;
- }
- }
- let mut ptrs = Vec::new();
- for (offset, prov) in alloc
- .provenance()
- .ptrs()
- .iter()
- .filter(|a| a.0 >= alloc_range.start && a.0 <= alloc_range.end())
- {
- ptrs.push((offset.bytes_usize() - alloc_range.start.bytes_usize(), opaque(prov)));
- }
- Allocation {
- bytes: bytes,
- provenance: ProvenanceMap { ptrs },
- align: alloc.align.bytes(),
- mutability: alloc.mutability.stable(tables),
- }
-}
-
-#[derive(Clone, Debug)]
-pub enum ConstantKind {
- Allocated(Allocation),
- Unevaluated(UnevaluatedConst),
- ParamCt(Opaque),
-}
-
-#[derive(Clone, Debug)]
-pub struct UnevaluatedConst {
- pub ty: Ty,
- pub def: ConstDef,
- pub args: GenericArgs,
- pub promoted: Option<Promoted>,
-}
-
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub enum TraitSpecializationKind {
- None,
- Marker,
- AlwaysApplicable,
-}
-
-#[derive(Clone, Debug)]
-pub struct TraitDecl {
- pub def_id: TraitDef,
- pub unsafety: Safety,
- pub paren_sugar: bool,
- pub has_auto_impl: bool,
- pub is_marker: bool,
- pub is_coinductive: bool,
- pub skip_array_during_method_dispatch: bool,
- pub specialization_kind: TraitSpecializationKind,
- pub must_implement_one_of: Option<Vec<Ident>>,
- pub implement_via_object: bool,
- pub deny_explicit_impl: bool,
-}
-
-pub type ImplTrait = EarlyBinder<TraitRef>;
-
-#[derive(Clone, Debug)]
-pub struct TraitRef {
- pub def_id: TraitDef,
- pub args: GenericArgs,
-}
diff --git a/compiler/rustc_span/src/analyze_source_file.rs b/compiler/rustc_span/src/analyze_source_file.rs
index 26cd54210..450d5455f 100644
--- a/compiler/rustc_span/src/analyze_source_file.rs
+++ b/compiler/rustc_span/src/analyze_source_file.rs
@@ -11,26 +11,19 @@ mod tests;
/// is detected at runtime.
pub fn analyze_source_file(
src: &str,
- source_file_start_pos: BytePos,
-) -> (Vec<BytePos>, Vec<MultiByteChar>, Vec<NonNarrowChar>) {
- let mut lines = vec![source_file_start_pos];
+) -> (Vec<RelativeBytePos>, Vec<MultiByteChar>, Vec<NonNarrowChar>) {
+ let mut lines = vec![RelativeBytePos::from_u32(0)];
let mut multi_byte_chars = vec![];
let mut non_narrow_chars = vec![];
// Calls the right implementation, depending on hardware support available.
- analyze_source_file_dispatch(
- src,
- source_file_start_pos,
- &mut lines,
- &mut multi_byte_chars,
- &mut non_narrow_chars,
- );
+ analyze_source_file_dispatch(src, &mut lines, &mut multi_byte_chars, &mut non_narrow_chars);
// The code above optimistically registers a new line *after* each \n
// it encounters. If that point is already outside the source_file, remove
// it again.
if let Some(&last_line_start) = lines.last() {
- let source_file_end = source_file_start_pos + BytePos::from_usize(src.len());
+ let source_file_end = RelativeBytePos::from_usize(src.len());
assert!(source_file_end >= last_line_start);
if last_line_start == source_file_end {
lines.pop();
@@ -43,14 +36,12 @@ pub fn analyze_source_file(
cfg_if::cfg_if! {
if #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] {
fn analyze_source_file_dispatch(src: &str,
- source_file_start_pos: BytePos,
- lines: &mut Vec<BytePos>,
+ lines: &mut Vec<RelativeBytePos>,
multi_byte_chars: &mut Vec<MultiByteChar>,
non_narrow_chars: &mut Vec<NonNarrowChar>) {
if is_x86_feature_detected!("sse2") {
unsafe {
analyze_source_file_sse2(src,
- source_file_start_pos,
lines,
multi_byte_chars,
non_narrow_chars);
@@ -58,7 +49,7 @@ cfg_if::cfg_if! {
} else {
analyze_source_file_generic(src,
src.len(),
- source_file_start_pos,
+ RelativeBytePos::from_u32(0),
lines,
multi_byte_chars,
non_narrow_chars);
@@ -72,8 +63,7 @@ cfg_if::cfg_if! {
/// SSE2 intrinsics to quickly find all newlines.
#[target_feature(enable = "sse2")]
unsafe fn analyze_source_file_sse2(src: &str,
- output_offset: BytePos,
- lines: &mut Vec<BytePos>,
+ lines: &mut Vec<RelativeBytePos>,
multi_byte_chars: &mut Vec<MultiByteChar>,
non_narrow_chars: &mut Vec<NonNarrowChar>) {
#[cfg(target_arch = "x86")]
@@ -129,8 +119,7 @@ cfg_if::cfg_if! {
if control_char_mask == newlines_mask {
// All control characters are newlines, record them
let mut newlines_mask = 0xFFFF0000 | newlines_mask as u32;
- let output_offset = output_offset +
- BytePos::from_usize(chunk_index * CHUNK_SIZE + 1);
+ let output_offset = RelativeBytePos::from_usize(chunk_index * CHUNK_SIZE + 1);
loop {
let index = newlines_mask.trailing_zeros();
@@ -140,7 +129,7 @@ cfg_if::cfg_if! {
break
}
- lines.push(BytePos(index) + output_offset);
+ lines.push(RelativeBytePos(index) + output_offset);
// Clear the bit, so we can find the next one.
newlines_mask &= (!1) << index;
@@ -165,7 +154,7 @@ cfg_if::cfg_if! {
intra_chunk_offset = analyze_source_file_generic(
&src[scan_start .. ],
CHUNK_SIZE - intra_chunk_offset,
- BytePos::from_usize(scan_start) + output_offset,
+ RelativeBytePos::from_usize(scan_start),
lines,
multi_byte_chars,
non_narrow_chars
@@ -177,7 +166,7 @@ cfg_if::cfg_if! {
if tail_start < src.len() {
analyze_source_file_generic(&src[tail_start ..],
src.len() - tail_start,
- output_offset + BytePos::from_usize(tail_start),
+ RelativeBytePos::from_usize(tail_start),
lines,
multi_byte_chars,
non_narrow_chars);
@@ -187,13 +176,12 @@ cfg_if::cfg_if! {
// The target (or compiler version) does not support SSE2 ...
fn analyze_source_file_dispatch(src: &str,
- source_file_start_pos: BytePos,
- lines: &mut Vec<BytePos>,
+ lines: &mut Vec<RelativeBytePos>,
multi_byte_chars: &mut Vec<MultiByteChar>,
non_narrow_chars: &mut Vec<NonNarrowChar>) {
analyze_source_file_generic(src,
src.len(),
- source_file_start_pos,
+ RelativeBytePos::from_u32(0),
lines,
multi_byte_chars,
non_narrow_chars);
@@ -207,8 +195,8 @@ cfg_if::cfg_if! {
fn analyze_source_file_generic(
src: &str,
scan_len: usize,
- output_offset: BytePos,
- lines: &mut Vec<BytePos>,
+ output_offset: RelativeBytePos,
+ lines: &mut Vec<RelativeBytePos>,
multi_byte_chars: &mut Vec<MultiByteChar>,
non_narrow_chars: &mut Vec<NonNarrowChar>,
) -> usize {
@@ -230,11 +218,11 @@ fn analyze_source_file_generic(
// This is an ASCII control character, it could be one of the cases
// that are interesting to us.
- let pos = BytePos::from_usize(i) + output_offset;
+ let pos = RelativeBytePos::from_usize(i) + output_offset;
match byte {
b'\n' => {
- lines.push(pos + BytePos(1));
+ lines.push(pos + RelativeBytePos(1));
}
b'\t' => {
non_narrow_chars.push(NonNarrowChar::Tab(pos));
@@ -250,7 +238,7 @@ fn analyze_source_file_generic(
let c = src[i..].chars().next().unwrap();
char_len = c.len_utf8();
- let pos = BytePos::from_usize(i) + output_offset;
+ let pos = RelativeBytePos::from_usize(i) + output_offset;
if char_len > 1 {
assert!((2..=4).contains(&char_len));
diff --git a/compiler/rustc_span/src/analyze_source_file/tests.rs b/compiler/rustc_span/src/analyze_source_file/tests.rs
index 66aefc9a7..0c77d080c 100644
--- a/compiler/rustc_span/src/analyze_source_file/tests.rs
+++ b/compiler/rustc_span/src/analyze_source_file/tests.rs
@@ -3,29 +3,28 @@ use super::*;
macro_rules! test {
(case: $test_name:ident,
text: $text:expr,
- source_file_start_pos: $source_file_start_pos:expr,
lines: $lines:expr,
multi_byte_chars: $multi_byte_chars:expr,
non_narrow_chars: $non_narrow_chars:expr,) => {
#[test]
fn $test_name() {
- let (lines, multi_byte_chars, non_narrow_chars) =
- analyze_source_file($text, BytePos($source_file_start_pos));
+ let (lines, multi_byte_chars, non_narrow_chars) = analyze_source_file($text);
- let expected_lines: Vec<BytePos> = $lines.into_iter().map(BytePos).collect();
+ let expected_lines: Vec<RelativeBytePos> =
+ $lines.into_iter().map(RelativeBytePos).collect();
assert_eq!(lines, expected_lines);
let expected_mbcs: Vec<MultiByteChar> = $multi_byte_chars
.into_iter()
- .map(|(pos, bytes)| MultiByteChar { pos: BytePos(pos), bytes })
+ .map(|(pos, bytes)| MultiByteChar { pos: RelativeBytePos(pos), bytes })
.collect();
assert_eq!(multi_byte_chars, expected_mbcs);
let expected_nncs: Vec<NonNarrowChar> = $non_narrow_chars
.into_iter()
- .map(|(pos, width)| NonNarrowChar::new(BytePos(pos), width))
+ .map(|(pos, width)| NonNarrowChar::new(RelativeBytePos(pos), width))
.collect();
assert_eq!(non_narrow_chars, expected_nncs);
@@ -36,7 +35,6 @@ macro_rules! test {
test!(
case: empty_text,
text: "",
- source_file_start_pos: 0,
lines: vec![],
multi_byte_chars: vec![],
non_narrow_chars: vec![],
@@ -45,7 +43,6 @@ test!(
test!(
case: newlines_short,
text: "a\nc",
- source_file_start_pos: 0,
lines: vec![0, 2],
multi_byte_chars: vec![],
non_narrow_chars: vec![],
@@ -54,7 +51,6 @@ test!(
test!(
case: newlines_long,
text: "012345678\nabcdef012345678\na",
- source_file_start_pos: 0,
lines: vec![0, 10, 26],
multi_byte_chars: vec![],
non_narrow_chars: vec![],
@@ -63,7 +59,6 @@ test!(
test!(
case: newline_and_multi_byte_char_in_same_chunk,
text: "01234β789\nbcdef0123456789abcdef",
- source_file_start_pos: 0,
lines: vec![0, 11],
multi_byte_chars: vec![(5, 2)],
non_narrow_chars: vec![],
@@ -72,7 +67,6 @@ test!(
test!(
case: newline_and_control_char_in_same_chunk,
text: "01234\u{07}6789\nbcdef0123456789abcdef",
- source_file_start_pos: 0,
lines: vec![0, 11],
multi_byte_chars: vec![],
non_narrow_chars: vec![(5, 0)],
@@ -81,7 +75,6 @@ test!(
test!(
case: multi_byte_char_short,
text: "aβc",
- source_file_start_pos: 0,
lines: vec![0],
multi_byte_chars: vec![(1, 2)],
non_narrow_chars: vec![],
@@ -90,7 +83,6 @@ test!(
test!(
case: multi_byte_char_long,
text: "0123456789abcΔf012345β",
- source_file_start_pos: 0,
lines: vec![0],
multi_byte_chars: vec![(13, 2), (22, 2)],
non_narrow_chars: vec![],
@@ -99,7 +91,6 @@ test!(
test!(
case: multi_byte_char_across_chunk_boundary,
text: "0123456789abcdeΔ123456789abcdef01234",
- source_file_start_pos: 0,
lines: vec![0],
multi_byte_chars: vec![(15, 2)],
non_narrow_chars: vec![],
@@ -108,7 +99,6 @@ test!(
test!(
case: multi_byte_char_across_chunk_boundary_tail,
text: "0123456789abcdeΔ....",
- source_file_start_pos: 0,
lines: vec![0],
multi_byte_chars: vec![(15, 2)],
non_narrow_chars: vec![],
@@ -117,7 +107,6 @@ test!(
test!(
case: non_narrow_short,
text: "0\t2",
- source_file_start_pos: 0,
lines: vec![0],
multi_byte_chars: vec![],
non_narrow_chars: vec![(1, 4)],
@@ -126,7 +115,6 @@ test!(
test!(
case: non_narrow_long,
text: "01\t3456789abcdef01234567\u{07}9",
- source_file_start_pos: 0,
lines: vec![0],
multi_byte_chars: vec![],
non_narrow_chars: vec![(2, 4), (24, 0)],
@@ -135,8 +123,7 @@ test!(
test!(
case: output_offset_all,
text: "01\t345\n789abcΔf01234567\u{07}9\nbcΔf",
- source_file_start_pos: 1000,
- lines: vec![0 + 1000, 7 + 1000, 27 + 1000],
- multi_byte_chars: vec![(13 + 1000, 2), (29 + 1000, 2)],
- non_narrow_chars: vec![(2 + 1000, 4), (24 + 1000, 0)],
+ lines: vec![0, 7, 27],
+ multi_byte_chars: vec![(13, 2), (29, 2)],
+ non_narrow_chars: vec![(2, 4), (24, 0)],
);
diff --git a/compiler/rustc_span/src/caching_source_map_view.rs b/compiler/rustc_span/src/caching_source_map_view.rs
index 886112769..fbfc5c22f 100644
--- a/compiler/rustc_span/src/caching_source_map_view.rs
+++ b/compiler/rustc_span/src/caching_source_map_view.rs
@@ -1,5 +1,5 @@
use crate::source_map::SourceMap;
-use crate::{BytePos, SourceFile, SpanData};
+use crate::{BytePos, Pos, RelativeBytePos, SourceFile, SpanData};
use rustc_data_structures::sync::Lrc;
use std::ops::Range;
@@ -37,6 +37,7 @@ impl CacheEntry {
self.file_index = file_idx;
}
+ let pos = self.file.relative_position(pos);
let line_index = self.file.lookup_line(pos).unwrap();
let line_bounds = self.file.line_bounds(line_index);
self.line_number = line_index + 1;
@@ -79,7 +80,7 @@ impl<'sm> CachingSourceMapView<'sm> {
pub fn byte_pos_to_line_and_col(
&mut self,
pos: BytePos,
- ) -> Option<(Lrc<SourceFile>, usize, BytePos)> {
+ ) -> Option<(Lrc<SourceFile>, usize, RelativeBytePos)> {
self.time_stamp += 1;
// Check if the position is in one of the cached lines
@@ -88,11 +89,8 @@ impl<'sm> CachingSourceMapView<'sm> {
let cache_entry = &mut self.line_cache[cache_idx as usize];
cache_entry.touch(self.time_stamp);
- return Some((
- cache_entry.file.clone(),
- cache_entry.line_number,
- pos - cache_entry.line.start,
- ));
+ let col = RelativeBytePos(pos.to_u32() - cache_entry.line.start.to_u32());
+ return Some((cache_entry.file.clone(), cache_entry.line_number, col));
}
// No cache hit ...
@@ -108,7 +106,8 @@ impl<'sm> CachingSourceMapView<'sm> {
let cache_entry = &mut self.line_cache[oldest];
cache_entry.update(new_file_and_idx, pos, self.time_stamp);
- Some((cache_entry.file.clone(), cache_entry.line_number, pos - cache_entry.line.start))
+ let col = RelativeBytePos(pos.to_u32() - cache_entry.line.start.to_u32());
+ Some((cache_entry.file.clone(), cache_entry.line_number, col))
}
pub fn span_data_to_lines_and_cols(
diff --git a/compiler/rustc_span/src/hygiene.rs b/compiler/rustc_span/src/hygiene.rs
index 9f2ff4378..88081700c 100644
--- a/compiler/rustc_span/src/hygiene.rs
+++ b/compiler/rustc_span/src/hygiene.rs
@@ -34,11 +34,13 @@ use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::stable_hasher::HashingControls;
use rustc_data_structures::stable_hasher::{Hash64, HashStable, StableHasher};
-use rustc_data_structures::sync::{Lock, Lrc};
+use rustc_data_structures::sync::{Lock, Lrc, WorkerLocal};
use rustc_data_structures::unhash::UnhashMap;
use rustc_index::IndexVec;
use rustc_macros::HashStable_Generic;
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use std::cell::RefCell;
+use std::collections::hash_map::Entry;
use std::fmt;
use std::hash::Hash;
@@ -1241,13 +1243,25 @@ impl HygieneEncodeContext {
#[derive(Default)]
/// Additional information used to assist in decoding hygiene data
-pub struct HygieneDecodeContext {
+struct HygieneDecodeContextInner {
// Maps serialized `SyntaxContext` ids to a `SyntaxContext` in the current
// global `HygieneData`. When we deserialize a `SyntaxContext`, we need to create
// a new id in the global `HygieneData`. This map tracks the ID we end up picking,
// so that multiple occurrences of the same serialized id are decoded to the same
- // `SyntaxContext`
- remapped_ctxts: Lock<Vec<Option<SyntaxContext>>>,
+ // `SyntaxContext`. This only stores `SyntaxContext`s which are completly decoded.
+ remapped_ctxts: Vec<Option<SyntaxContext>>,
+
+ /// Maps serialized `SyntaxContext` ids that are currently being decoded to a `SyntaxContext`.
+ decoding: FxHashMap<u32, SyntaxContext>,
+}
+
+#[derive(Default)]
+/// Additional information used to assist in decoding hygiene data
+pub struct HygieneDecodeContext {
+ inner: Lock<HygieneDecodeContextInner>,
+
+ /// A set of serialized `SyntaxContext` ids that are currently being decoded on each thread.
+ local_in_progress: WorkerLocal<RefCell<FxHashMap<u32, ()>>>,
}
/// Register an expansion which has been decoded from the on-disk-cache for the local crate.
@@ -1277,11 +1291,11 @@ pub fn register_expn_id(
let expn_id = ExpnId { krate, local_id };
HygieneData::with(|hygiene_data| {
let _old_data = hygiene_data.foreign_expn_data.insert(expn_id, data);
- debug_assert!(_old_data.is_none());
+ debug_assert!(_old_data.is_none() || cfg!(parallel_compiler));
let _old_hash = hygiene_data.foreign_expn_hashes.insert(expn_id, hash);
- debug_assert!(_old_hash.is_none());
+ debug_assert!(_old_hash.is_none() || _old_hash == Some(hash));
let _old_id = hygiene_data.expn_hash_to_expn_id.insert(hash, expn_id);
- debug_assert!(_old_id.is_none());
+ debug_assert!(_old_id.is_none() || _old_id == Some(expn_id));
});
expn_id
}
@@ -1331,38 +1345,56 @@ pub fn decode_syntax_context<D: Decoder, F: FnOnce(&mut D, u32) -> SyntaxContext
return SyntaxContext::root();
}
- let outer_ctxts = &context.remapped_ctxts;
+ let ctxt = {
+ let mut inner = context.inner.lock();
- // Ensure that the lock() temporary is dropped early
- {
- if let Some(ctxt) = outer_ctxts.lock().get(raw_id as usize).copied().flatten() {
+ if let Some(ctxt) = inner.remapped_ctxts.get(raw_id as usize).copied().flatten() {
+ // This has already beeen decoded.
return ctxt;
}
- }
- // Allocate and store SyntaxContext id *before* calling the decoder function,
- // as the SyntaxContextData may reference itself.
- let new_ctxt = HygieneData::with(|hygiene_data| {
- let new_ctxt = SyntaxContext(hygiene_data.syntax_context_data.len() as u32);
- // Push a dummy SyntaxContextData to ensure that nobody else can get the
- // same ID as us. This will be overwritten after call `decode_Data`
- hygiene_data.syntax_context_data.push(SyntaxContextData {
- outer_expn: ExpnId::root(),
- outer_transparency: Transparency::Transparent,
- parent: SyntaxContext::root(),
- opaque: SyntaxContext::root(),
- opaque_and_semitransparent: SyntaxContext::root(),
- dollar_crate_name: kw::Empty,
- });
- let mut ctxts = outer_ctxts.lock();
- let new_len = raw_id as usize + 1;
- if ctxts.len() < new_len {
- ctxts.resize(new_len, None);
+ match inner.decoding.entry(raw_id) {
+ Entry::Occupied(ctxt_entry) => {
+ match context.local_in_progress.borrow_mut().entry(raw_id) {
+ Entry::Occupied(..) => {
+ // We're decoding this already on the current thread. Return here
+ // and let the function higher up the stack finish decoding to handle
+ // recursive cases.
+ return *ctxt_entry.get();
+ }
+ Entry::Vacant(entry) => {
+ entry.insert(());
+
+ // Some other thread is current decoding this. Race with it.
+ *ctxt_entry.get()
+ }
+ }
+ }
+ Entry::Vacant(entry) => {
+ // We are the first thread to start decoding. Mark the current thread as being progress.
+ context.local_in_progress.borrow_mut().insert(raw_id, ());
+
+ // Allocate and store SyntaxContext id *before* calling the decoder function,
+ // as the SyntaxContextData may reference itself.
+ let new_ctxt = HygieneData::with(|hygiene_data| {
+ let new_ctxt = SyntaxContext(hygiene_data.syntax_context_data.len() as u32);
+ // Push a dummy SyntaxContextData to ensure that nobody else can get the
+ // same ID as us. This will be overwritten after call `decode_Data`
+ hygiene_data.syntax_context_data.push(SyntaxContextData {
+ outer_expn: ExpnId::root(),
+ outer_transparency: Transparency::Transparent,
+ parent: SyntaxContext::root(),
+ opaque: SyntaxContext::root(),
+ opaque_and_semitransparent: SyntaxContext::root(),
+ dollar_crate_name: kw::Empty,
+ });
+ new_ctxt
+ });
+ entry.insert(new_ctxt);
+ new_ctxt
+ }
}
- ctxts[raw_id as usize] = Some(new_ctxt);
- drop(ctxts);
- new_ctxt
- });
+ };
// Don't try to decode data while holding the lock, since we need to
// be able to recursively decode a SyntaxContext
@@ -1375,14 +1407,32 @@ pub fn decode_syntax_context<D: Decoder, F: FnOnce(&mut D, u32) -> SyntaxContext
// Overwrite the dummy data with our decoded SyntaxContextData
HygieneData::with(|hygiene_data| {
let dummy = std::mem::replace(
- &mut hygiene_data.syntax_context_data[new_ctxt.as_u32() as usize],
+ &mut hygiene_data.syntax_context_data[ctxt.as_u32() as usize],
ctxt_data,
);
- // Make sure nothing weird happening while `decode_data` was running
- assert_eq!(dummy.dollar_crate_name, kw::Empty);
+ if cfg!(not(parallel_compiler)) {
+ // Make sure nothing weird happened while `decode_data` was running.
+ // We used `kw::Empty` for the dummy value and we expect nothing to be
+ // modifying the dummy entry.
+ // This does not hold for the parallel compiler as another thread may
+ // have inserted the fully decoded data.
+ assert_eq!(dummy.dollar_crate_name, kw::Empty);
+ }
});
- new_ctxt
+ // Mark the context as completed
+
+ context.local_in_progress.borrow_mut().remove(&raw_id);
+
+ let mut inner = context.inner.lock();
+ let new_len = raw_id as usize + 1;
+ if inner.remapped_ctxts.len() < new_len {
+ inner.remapped_ctxts.resize(new_len, None);
+ }
+ inner.remapped_ctxts[raw_id as usize] = Some(ctxt);
+ inner.decoding.remove(&raw_id);
+
+ ctxt
}
fn for_all_ctxts_in<F: FnMut(u32, SyntaxContext, &SyntaxContextData)>(
diff --git a/compiler/rustc_span/src/lib.rs b/compiler/rustc_span/src/lib.rs
index c24b8d9ec..772e09291 100644
--- a/compiler/rustc_span/src/lib.rs
+++ b/compiler/rustc_span/src/lib.rs
@@ -21,9 +21,11 @@
#![feature(rustc_attrs)]
#![feature(let_chains)]
#![feature(round_char_boundary)]
+#![feature(read_buf)]
+#![feature(new_uninit)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#[macro_use]
extern crate rustc_macros;
@@ -31,7 +33,7 @@ extern crate rustc_macros;
#[macro_use]
extern crate tracing;
-use rustc_data_structures::AtomicRef;
+use rustc_data_structures::{outline, AtomicRef};
use rustc_macros::HashStable_Generic;
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
@@ -62,7 +64,7 @@ pub mod fatal_error;
pub mod profiling;
use rustc_data_structures::stable_hasher::{Hash128, Hash64, HashStable, StableHasher};
-use rustc_data_structures::sync::{Lock, Lrc};
+use rustc_data_structures::sync::{FreezeLock, FreezeWriteGuard, Lock, Lrc};
use std::borrow::Cow;
use std::cmp::{self, Ordering};
@@ -508,10 +510,6 @@ impl SpanData {
pub fn is_dummy(self) -> bool {
self.lo.0 == 0 && self.hi.0 == 0
}
- #[inline]
- pub fn is_visible(self, sm: &SourceMap) -> bool {
- !self.is_dummy() && sm.is_span_accessible(self.span())
- }
/// Returns `true` if `self` fully encloses `other`.
pub fn contains(self, other: Self) -> bool {
self.lo <= other.lo && other.hi <= self.hi
@@ -571,15 +569,9 @@ impl Span {
self.data().with_parent(ctxt)
}
- /// Returns `true` if this is a dummy span with any hygienic context.
- #[inline]
- pub fn is_dummy(self) -> bool {
- self.data_untracked().is_dummy()
- }
-
#[inline]
pub fn is_visible(self, sm: &SourceMap) -> bool {
- self.data_untracked().is_visible(sm)
+ !self.is_dummy() && sm.is_span_accessible(self)
}
/// Returns `true` if this span comes from any kind of macro, desugaring or inlining.
@@ -1105,27 +1097,27 @@ impl fmt::Debug for SpanData {
}
/// Identifies an offset of a multi-byte character in a `SourceFile`.
-#[derive(Copy, Clone, Encodable, Decodable, Eq, PartialEq, Debug)]
+#[derive(Copy, Clone, Encodable, Decodable, Eq, PartialEq, Debug, HashStable_Generic)]
pub struct MultiByteChar {
- /// The absolute offset of the character in the `SourceMap`.
- pub pos: BytePos,
+ /// The relative offset of the character in the `SourceFile`.
+ pub pos: RelativeBytePos,
/// The number of bytes, `>= 2`.
pub bytes: u8,
}
/// Identifies an offset of a non-narrow character in a `SourceFile`.
-#[derive(Copy, Clone, Encodable, Decodable, Eq, PartialEq, Debug)]
+#[derive(Copy, Clone, Encodable, Decodable, Eq, PartialEq, Debug, HashStable_Generic)]
pub enum NonNarrowChar {
/// Represents a zero-width character.
- ZeroWidth(BytePos),
+ ZeroWidth(RelativeBytePos),
/// Represents a wide (full-width) character.
- Wide(BytePos),
+ Wide(RelativeBytePos),
/// Represents a tab character, represented visually with a width of 4 characters.
- Tab(BytePos),
+ Tab(RelativeBytePos),
}
impl NonNarrowChar {
- fn new(pos: BytePos, width: usize) -> Self {
+ fn new(pos: RelativeBytePos, width: usize) -> Self {
match width {
0 => NonNarrowChar::ZeroWidth(pos),
2 => NonNarrowChar::Wide(pos),
@@ -1134,8 +1126,8 @@ impl NonNarrowChar {
}
}
- /// Returns the absolute offset of the character in the `SourceMap`.
- pub fn pos(&self) -> BytePos {
+ /// Returns the relative offset of the character in the `SourceFile`.
+ pub fn pos(&self) -> RelativeBytePos {
match *self {
NonNarrowChar::ZeroWidth(p) | NonNarrowChar::Wide(p) | NonNarrowChar::Tab(p) => p,
}
@@ -1151,10 +1143,10 @@ impl NonNarrowChar {
}
}
-impl Add<BytePos> for NonNarrowChar {
+impl Add<RelativeBytePos> for NonNarrowChar {
type Output = Self;
- fn add(self, rhs: BytePos) -> Self {
+ fn add(self, rhs: RelativeBytePos) -> Self {
match self {
NonNarrowChar::ZeroWidth(pos) => NonNarrowChar::ZeroWidth(pos + rhs),
NonNarrowChar::Wide(pos) => NonNarrowChar::Wide(pos + rhs),
@@ -1163,10 +1155,10 @@ impl Add<BytePos> for NonNarrowChar {
}
}
-impl Sub<BytePos> for NonNarrowChar {
+impl Sub<RelativeBytePos> for NonNarrowChar {
type Output = Self;
- fn sub(self, rhs: BytePos) -> Self {
+ fn sub(self, rhs: RelativeBytePos) -> Self {
match self {
NonNarrowChar::ZeroWidth(pos) => NonNarrowChar::ZeroWidth(pos - rhs),
NonNarrowChar::Wide(pos) => NonNarrowChar::Wide(pos - rhs),
@@ -1176,10 +1168,10 @@ impl Sub<BytePos> for NonNarrowChar {
}
/// Identifies an offset of a character that was normalized away from `SourceFile`.
-#[derive(Copy, Clone, Encodable, Decodable, Eq, PartialEq, Debug)]
+#[derive(Copy, Clone, Encodable, Decodable, Eq, PartialEq, Debug, HashStable_Generic)]
pub struct NormalizedPos {
- /// The absolute offset of the character in the `SourceMap`.
- pub pos: BytePos,
+ /// The relative offset of the character in the `SourceFile`.
+ pub pos: RelativeBytePos,
/// The difference between original and normalized string at position.
pub diff: u32,
}
@@ -1204,7 +1196,6 @@ pub enum ExternalSourceKind {
AbsentOk,
/// A failed attempt has been made to load the external source.
AbsentErr,
- Unneeded,
}
impl ExternalSource {
@@ -1291,7 +1282,7 @@ impl SourceFileHash {
#[derive(Clone)]
pub enum SourceFileLines {
/// The source file lines, in decoded (random-access) form.
- Lines(Vec<BytePos>),
+ Lines(Vec<RelativeBytePos>),
/// The source file lines, in undecoded difference list form.
Diffs(SourceFileDiffs),
@@ -1312,11 +1303,6 @@ impl SourceFileLines {
/// small crates where very little of `std`'s metadata is used.
#[derive(Clone)]
pub struct SourceFileDiffs {
- /// Position of the first line. Note that this is always encoded as a
- /// `BytePos` because it is often much larger than any of the
- /// differences.
- line_start: BytePos,
-
/// Always 1, 2, or 4. Always as small as possible, while being big
/// enough to hold the length of the longest line in the source file.
/// The 1 case is by far the most common.
@@ -1346,13 +1332,13 @@ pub struct SourceFile {
pub src_hash: SourceFileHash,
/// The external source code (used for external crates, which will have a `None`
/// value as `self.src`.
- pub external_src: Lock<ExternalSource>,
+ pub external_src: FreezeLock<ExternalSource>,
/// The start position of this source in the `SourceMap`.
pub start_pos: BytePos,
- /// The end position of this source in the `SourceMap`.
- pub end_pos: BytePos,
+ /// The byte length of this source.
+ pub source_len: RelativeBytePos,
/// Locations of lines beginnings in the source code.
- pub lines: Lock<SourceFileLines>,
+ pub lines: FreezeLock<SourceFileLines>,
/// Locations of multi-byte characters in the source code.
pub multibyte_chars: Vec<MultiByteChar>,
/// Width of characters that are not narrow in the source code.
@@ -1371,10 +1357,10 @@ impl Clone for SourceFile {
name: self.name.clone(),
src: self.src.clone(),
src_hash: self.src_hash,
- external_src: Lock::new(self.external_src.borrow().clone()),
+ external_src: self.external_src.clone(),
start_pos: self.start_pos,
- end_pos: self.end_pos,
- lines: Lock::new(self.lines.borrow().clone()),
+ source_len: self.source_len,
+ lines: self.lines.clone(),
multibyte_chars: self.multibyte_chars.clone(),
non_narrow_chars: self.non_narrow_chars.clone(),
normalized_pos: self.normalized_pos.clone(),
@@ -1388,68 +1374,67 @@ impl<S: Encoder> Encodable<S> for SourceFile {
fn encode(&self, s: &mut S) {
self.name.encode(s);
self.src_hash.encode(s);
- self.start_pos.encode(s);
- self.end_pos.encode(s);
+ // Do not encode `start_pos` as it's global state for this session.
+ self.source_len.encode(s);
// We are always in `Lines` form by the time we reach here.
- assert!(self.lines.borrow().is_lines());
- self.lines(|lines| {
- // Store the length.
- s.emit_u32(lines.len() as u32);
-
- // Compute and store the difference list.
- if lines.len() != 0 {
- let max_line_length = if lines.len() == 1 {
- 0
- } else {
- lines
- .array_windows()
- .map(|&[fst, snd]| snd - fst)
- .map(|bp| bp.to_usize())
- .max()
- .unwrap()
- };
-
- let bytes_per_diff: usize = match max_line_length {
- 0..=0xFF => 1,
- 0x100..=0xFFFF => 2,
- _ => 4,
- };
-
- // Encode the number of bytes used per diff.
- s.emit_u8(bytes_per_diff as u8);
-
- // Encode the first element.
- lines[0].encode(s);
-
- // Encode the difference list.
- let diff_iter = lines.array_windows().map(|&[fst, snd]| snd - fst);
- let num_diffs = lines.len() - 1;
- let mut raw_diffs;
- match bytes_per_diff {
- 1 => {
- raw_diffs = Vec::with_capacity(num_diffs);
- for diff in diff_iter {
- raw_diffs.push(diff.0 as u8);
- }
+ assert!(self.lines.read().is_lines());
+ let lines = self.lines();
+ // Store the length.
+ s.emit_u32(lines.len() as u32);
+
+ // Compute and store the difference list.
+ if lines.len() != 0 {
+ let max_line_length = if lines.len() == 1 {
+ 0
+ } else {
+ lines
+ .array_windows()
+ .map(|&[fst, snd]| snd - fst)
+ .map(|bp| bp.to_usize())
+ .max()
+ .unwrap()
+ };
+
+ let bytes_per_diff: usize = match max_line_length {
+ 0..=0xFF => 1,
+ 0x100..=0xFFFF => 2,
+ _ => 4,
+ };
+
+ // Encode the number of bytes used per diff.
+ s.emit_u8(bytes_per_diff as u8);
+
+ // Encode the first element.
+ assert_eq!(lines[0], RelativeBytePos(0));
+
+ // Encode the difference list.
+ let diff_iter = lines.array_windows().map(|&[fst, snd]| snd - fst);
+ let num_diffs = lines.len() - 1;
+ let mut raw_diffs;
+ match bytes_per_diff {
+ 1 => {
+ raw_diffs = Vec::with_capacity(num_diffs);
+ for diff in diff_iter {
+ raw_diffs.push(diff.0 as u8);
}
- 2 => {
- raw_diffs = Vec::with_capacity(bytes_per_diff * num_diffs);
- for diff in diff_iter {
- raw_diffs.extend_from_slice(&(diff.0 as u16).to_le_bytes());
- }
+ }
+ 2 => {
+ raw_diffs = Vec::with_capacity(bytes_per_diff * num_diffs);
+ for diff in diff_iter {
+ raw_diffs.extend_from_slice(&(diff.0 as u16).to_le_bytes());
}
- 4 => {
- raw_diffs = Vec::with_capacity(bytes_per_diff * num_diffs);
- for diff in diff_iter {
- raw_diffs.extend_from_slice(&(diff.0).to_le_bytes());
- }
+ }
+ 4 => {
+ raw_diffs = Vec::with_capacity(bytes_per_diff * num_diffs);
+ for diff in diff_iter {
+ raw_diffs.extend_from_slice(&(diff.0).to_le_bytes());
}
- _ => unreachable!(),
}
- s.emit_raw_bytes(&raw_diffs);
+ _ => unreachable!(),
}
- });
+ s.emit_raw_bytes(&raw_diffs);
+ }
self.multibyte_chars.encode(s);
self.non_narrow_chars.encode(s);
@@ -1463,26 +1448,17 @@ impl<D: Decoder> Decodable<D> for SourceFile {
fn decode(d: &mut D) -> SourceFile {
let name: FileName = Decodable::decode(d);
let src_hash: SourceFileHash = Decodable::decode(d);
- let start_pos: BytePos = Decodable::decode(d);
- let end_pos: BytePos = Decodable::decode(d);
+ let source_len: RelativeBytePos = Decodable::decode(d);
let lines = {
let num_lines: u32 = Decodable::decode(d);
if num_lines > 0 {
// Read the number of bytes used per diff.
let bytes_per_diff = d.read_u8() as usize;
- // Read the first element.
- let line_start: BytePos = Decodable::decode(d);
-
// Read the difference list.
let num_diffs = num_lines as usize - 1;
let raw_diffs = d.read_raw_bytes(bytes_per_diff * num_diffs).to_vec();
- SourceFileLines::Diffs(SourceFileDiffs {
- line_start,
- bytes_per_diff,
- num_diffs,
- raw_diffs,
- })
+ SourceFileLines::Diffs(SourceFileDiffs { bytes_per_diff, num_diffs, raw_diffs })
} else {
SourceFileLines::Lines(vec![])
}
@@ -1494,14 +1470,14 @@ impl<D: Decoder> Decodable<D> for SourceFile {
let cnum: CrateNum = Decodable::decode(d);
SourceFile {
name,
- start_pos,
- end_pos,
+ start_pos: BytePos::from_u32(0),
+ source_len,
src: None,
src_hash,
// Unused - the metadata decoder will construct
// a new SourceFile, filling in `external_src` properly
- external_src: Lock::new(ExternalSource::Unneeded),
- lines: Lock::new(lines),
+ external_src: FreezeLock::frozen(ExternalSource::Unneeded),
+ lines: FreezeLock::new(lines),
multibyte_chars,
non_narrow_chars,
normalized_pos,
@@ -1521,102 +1497,116 @@ impl SourceFile {
pub fn new(
name: FileName,
mut src: String,
- start_pos: BytePos,
hash_kind: SourceFileHashAlgorithm,
- ) -> Self {
+ ) -> Result<Self, OffsetOverflowError> {
// Compute the file hash before any normalization.
let src_hash = SourceFileHash::new(hash_kind, &src);
- let normalized_pos = normalize_src(&mut src, start_pos);
+ let normalized_pos = normalize_src(&mut src);
let name_hash = {
let mut hasher: StableHasher = StableHasher::new();
name.hash(&mut hasher);
hasher.finish()
};
- let end_pos = start_pos.to_usize() + src.len();
- assert!(end_pos <= u32::MAX as usize);
+ let source_len = src.len();
+ let source_len = u32::try_from(source_len).map_err(|_| OffsetOverflowError)?;
let (lines, multibyte_chars, non_narrow_chars) =
- analyze_source_file::analyze_source_file(&src, start_pos);
+ analyze_source_file::analyze_source_file(&src);
- SourceFile {
+ Ok(SourceFile {
name,
src: Some(Lrc::new(src)),
src_hash,
- external_src: Lock::new(ExternalSource::Unneeded),
- start_pos,
- end_pos: Pos::from_usize(end_pos),
- lines: Lock::new(SourceFileLines::Lines(lines)),
+ external_src: FreezeLock::frozen(ExternalSource::Unneeded),
+ start_pos: BytePos::from_u32(0),
+ source_len: RelativeBytePos::from_u32(source_len),
+ lines: FreezeLock::frozen(SourceFileLines::Lines(lines)),
multibyte_chars,
non_narrow_chars,
normalized_pos,
name_hash,
cnum: LOCAL_CRATE,
- }
+ })
}
- pub fn lines<F, R>(&self, f: F) -> R
- where
- F: FnOnce(&[BytePos]) -> R,
- {
- let mut guard = self.lines.borrow_mut();
- match &*guard {
- SourceFileLines::Lines(lines) => f(lines),
- SourceFileLines::Diffs(SourceFileDiffs {
- mut line_start,
- bytes_per_diff,
- num_diffs,
- raw_diffs,
- }) => {
- // Convert from "diffs" form to "lines" form.
- let num_lines = num_diffs + 1;
- let mut lines = Vec::with_capacity(num_lines);
- lines.push(line_start);
-
- assert_eq!(*num_diffs, raw_diffs.len() / bytes_per_diff);
- match bytes_per_diff {
- 1 => {
- lines.extend(raw_diffs.into_iter().map(|&diff| {
- line_start = line_start + BytePos(diff as u32);
- line_start
- }));
- }
- 2 => {
- lines.extend((0..*num_diffs).map(|i| {
- let pos = bytes_per_diff * i;
- let bytes = [raw_diffs[pos], raw_diffs[pos + 1]];
- let diff = u16::from_le_bytes(bytes);
- line_start = line_start + BytePos(diff as u32);
- line_start
- }));
- }
- 4 => {
- lines.extend((0..*num_diffs).map(|i| {
- let pos = bytes_per_diff * i;
- let bytes = [
- raw_diffs[pos],
- raw_diffs[pos + 1],
- raw_diffs[pos + 2],
- raw_diffs[pos + 3],
- ];
- let diff = u32::from_le_bytes(bytes);
- line_start = line_start + BytePos(diff);
- line_start
- }));
- }
- _ => unreachable!(),
- }
- let res = f(&lines);
- *guard = SourceFileLines::Lines(lines);
- res
+ /// This converts the `lines` field to contain `SourceFileLines::Lines` if needed and freezes it.
+ fn convert_diffs_to_lines_frozen(&self) {
+ let mut guard = if let Some(guard) = self.lines.try_write() { guard } else { return };
+
+ let SourceFileDiffs { bytes_per_diff, num_diffs, raw_diffs } = match &*guard {
+ SourceFileLines::Diffs(diffs) => diffs,
+ SourceFileLines::Lines(..) => {
+ FreezeWriteGuard::freeze(guard);
+ return;
+ }
+ };
+
+ // Convert from "diffs" form to "lines" form.
+ let num_lines = num_diffs + 1;
+ let mut lines = Vec::with_capacity(num_lines);
+ let mut line_start = RelativeBytePos(0);
+ lines.push(line_start);
+
+ assert_eq!(*num_diffs, raw_diffs.len() / bytes_per_diff);
+ match bytes_per_diff {
+ 1 => {
+ lines.extend(raw_diffs.into_iter().map(|&diff| {
+ line_start = line_start + RelativeBytePos(diff as u32);
+ line_start
+ }));
+ }
+ 2 => {
+ lines.extend((0..*num_diffs).map(|i| {
+ let pos = bytes_per_diff * i;
+ let bytes = [raw_diffs[pos], raw_diffs[pos + 1]];
+ let diff = u16::from_le_bytes(bytes);
+ line_start = line_start + RelativeBytePos(diff as u32);
+ line_start
+ }));
}
+ 4 => {
+ lines.extend((0..*num_diffs).map(|i| {
+ let pos = bytes_per_diff * i;
+ let bytes = [
+ raw_diffs[pos],
+ raw_diffs[pos + 1],
+ raw_diffs[pos + 2],
+ raw_diffs[pos + 3],
+ ];
+ let diff = u32::from_le_bytes(bytes);
+ line_start = line_start + RelativeBytePos(diff);
+ line_start
+ }));
+ }
+ _ => unreachable!(),
}
+
+ *guard = SourceFileLines::Lines(lines);
+
+ FreezeWriteGuard::freeze(guard);
+ }
+
+ pub fn lines(&self) -> &[RelativeBytePos] {
+ if let Some(SourceFileLines::Lines(lines)) = self.lines.get() {
+ return &lines[..];
+ }
+
+ outline(|| {
+ self.convert_diffs_to_lines_frozen();
+ if let Some(SourceFileLines::Lines(lines)) = self.lines.get() {
+ return &lines[..];
+ }
+ unreachable!()
+ })
}
/// Returns the `BytePos` of the beginning of the current line.
pub fn line_begin_pos(&self, pos: BytePos) -> BytePos {
+ let pos = self.relative_position(pos);
let line_index = self.lookup_line(pos).unwrap();
- self.lines(|lines| lines[line_index])
+ let line_start_pos = self.lines()[line_index];
+ self.absolute_position(line_start_pos)
}
/// Add externally loaded source.
@@ -1627,35 +1617,37 @@ impl SourceFile {
where
F: FnOnce() -> Option<String>,
{
- if matches!(
- *self.external_src.borrow(),
- ExternalSource::Foreign { kind: ExternalSourceKind::AbsentOk, .. }
- ) {
+ if !self.external_src.is_frozen() {
let src = get_src();
- let mut external_src = self.external_src.borrow_mut();
- // Check that no-one else have provided the source while we were getting it
- if let ExternalSource::Foreign {
- kind: src_kind @ ExternalSourceKind::AbsentOk, ..
- } = &mut *external_src
- {
- if let Some(mut src) = src {
- // The src_hash needs to be computed on the pre-normalized src.
- if self.src_hash.matches(&src) {
- normalize_src(&mut src, BytePos::from_usize(0));
- *src_kind = ExternalSourceKind::Present(Lrc::new(src));
- return true;
- }
+ let src = src.and_then(|mut src| {
+ // The src_hash needs to be computed on the pre-normalized src.
+ self.src_hash.matches(&src).then(|| {
+ normalize_src(&mut src);
+ src
+ })
+ });
+
+ self.external_src.try_write().map(|mut external_src| {
+ if let ExternalSource::Foreign {
+ kind: src_kind @ ExternalSourceKind::AbsentOk,
+ ..
+ } = &mut *external_src
+ {
+ *src_kind = if let Some(src) = src {
+ ExternalSourceKind::Present(Lrc::new(src))
+ } else {
+ ExternalSourceKind::AbsentErr
+ };
} else {
- *src_kind = ExternalSourceKind::AbsentErr;
+ panic!("unexpected state {:?}", *external_src)
}
- false
- } else {
- self.src.is_some() || external_src.get_source().is_some()
- }
- } else {
- self.src.is_some() || self.external_src.borrow().get_source().is_some()
+ // Freeze this so we don't try to load the source again.
+ FreezeWriteGuard::freeze(external_src)
+ });
}
+
+ self.src.is_some() || self.external_src.read().get_source().is_some()
}
/// Gets a line from the list of pre-computed line-beginnings.
@@ -1673,9 +1665,8 @@ impl SourceFile {
}
let begin = {
- let line = self.lines(|lines| lines.get(line_number).copied())?;
- let begin: BytePos = line - self.start_pos;
- begin.to_usize()
+ let line = self.lines().get(line_number).copied()?;
+ line.to_usize()
};
if let Some(ref src) = self.src {
@@ -1698,30 +1689,44 @@ impl SourceFile {
}
pub fn count_lines(&self) -> usize {
- self.lines(|lines| lines.len())
+ self.lines().len()
+ }
+
+ #[inline]
+ pub fn absolute_position(&self, pos: RelativeBytePos) -> BytePos {
+ BytePos::from_u32(pos.to_u32() + self.start_pos.to_u32())
+ }
+
+ #[inline]
+ pub fn relative_position(&self, pos: BytePos) -> RelativeBytePos {
+ RelativeBytePos::from_u32(pos.to_u32() - self.start_pos.to_u32())
+ }
+
+ #[inline]
+ pub fn end_position(&self) -> BytePos {
+ self.absolute_position(self.source_len)
}
/// Finds the line containing the given position. The return value is the
/// index into the `lines` array of this `SourceFile`, not the 1-based line
/// number. If the source_file is empty or the position is located before the
/// first line, `None` is returned.
- pub fn lookup_line(&self, pos: BytePos) -> Option<usize> {
- self.lines(|lines| lines.partition_point(|x| x <= &pos).checked_sub(1))
+ pub fn lookup_line(&self, pos: RelativeBytePos) -> Option<usize> {
+ self.lines().partition_point(|x| x <= &pos).checked_sub(1)
}
pub fn line_bounds(&self, line_index: usize) -> Range<BytePos> {
if self.is_empty() {
- return self.start_pos..self.end_pos;
+ return self.start_pos..self.start_pos;
}
- self.lines(|lines| {
- assert!(line_index < lines.len());
- if line_index == (lines.len() - 1) {
- lines[line_index]..self.end_pos
- } else {
- lines[line_index]..lines[line_index + 1]
- }
- })
+ let lines = self.lines();
+ assert!(line_index < lines.len());
+ if line_index == (lines.len() - 1) {
+ self.absolute_position(lines[line_index])..self.end_position()
+ } else {
+ self.absolute_position(lines[line_index])..self.absolute_position(lines[line_index + 1])
+ }
}
/// Returns whether or not the file contains the given `SourceMap` byte
@@ -1730,27 +1735,29 @@ impl SourceFile {
/// returns true still contain one byte position according to this function.
#[inline]
pub fn contains(&self, byte_pos: BytePos) -> bool {
- byte_pos >= self.start_pos && byte_pos <= self.end_pos
+ byte_pos >= self.start_pos && byte_pos <= self.end_position()
}
#[inline]
pub fn is_empty(&self) -> bool {
- self.start_pos == self.end_pos
+ self.source_len.to_u32() == 0
}
/// Calculates the original byte position relative to the start of the file
/// based on the given byte position.
- pub fn original_relative_byte_pos(&self, pos: BytePos) -> BytePos {
+ pub fn original_relative_byte_pos(&self, pos: BytePos) -> RelativeBytePos {
+ let pos = self.relative_position(pos);
+
// Diff before any records is 0. Otherwise use the previously recorded
// diff as that applies to the following characters until a new diff
// is recorded.
let diff = match self.normalized_pos.binary_search_by(|np| np.pos.cmp(&pos)) {
Ok(i) => self.normalized_pos[i].diff,
- Err(i) if i == 0 => 0,
+ Err(0) => 0,
Err(i) => self.normalized_pos[i - 1].diff,
};
- BytePos::from_u32(pos.0 - self.start_pos.0 + diff)
+ RelativeBytePos::from_u32(pos.0 + diff)
}
/// Calculates a normalized byte position from a byte offset relative to the
@@ -1768,15 +1775,15 @@ impl SourceFile {
.binary_search_by(|np| (np.pos.0 + np.diff).cmp(&(self.start_pos.0 + offset)))
{
Ok(i) => self.normalized_pos[i].diff,
- Err(i) if i == 0 => 0,
+ Err(0) => 0,
Err(i) => self.normalized_pos[i - 1].diff,
};
BytePos::from_u32(self.start_pos.0 + offset - diff)
}
- /// Converts an absolute `BytePos` to a `CharPos` relative to the `SourceFile`.
- pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
+ /// Converts an relative `RelativeBytePos` to a `CharPos` relative to the `SourceFile`.
+ fn bytepos_to_file_charpos(&self, bpos: RelativeBytePos) -> CharPos {
// The number of extra bytes due to multibyte chars in the `SourceFile`.
let mut total_extra_bytes = 0;
@@ -1794,18 +1801,18 @@ impl SourceFile {
}
}
- assert!(self.start_pos.to_u32() + total_extra_bytes <= bpos.to_u32());
- CharPos(bpos.to_usize() - self.start_pos.to_usize() - total_extra_bytes as usize)
+ assert!(total_extra_bytes <= bpos.to_u32());
+ CharPos(bpos.to_usize() - total_extra_bytes as usize)
}
/// Looks up the file's (1-based) line number and (0-based `CharPos`) column offset, for a
- /// given `BytePos`.
- pub fn lookup_file_pos(&self, pos: BytePos) -> (usize, CharPos) {
+ /// given `RelativeBytePos`.
+ fn lookup_file_pos(&self, pos: RelativeBytePos) -> (usize, CharPos) {
let chpos = self.bytepos_to_file_charpos(pos);
match self.lookup_line(pos) {
Some(a) => {
let line = a + 1; // Line numbers start at 1
- let linebpos = self.lines(|lines| lines[a]);
+ let linebpos = self.lines()[a];
let linechpos = self.bytepos_to_file_charpos(linebpos);
let col = chpos - linechpos;
debug!("byte pos {:?} is on the line at byte pos {:?}", pos, linebpos);
@@ -1821,10 +1828,11 @@ impl SourceFile {
/// Looks up the file's (1-based) line number, (0-based `CharPos`) column offset, and (0-based)
/// column offset when displayed, for a given `BytePos`.
pub fn lookup_file_pos_with_col_display(&self, pos: BytePos) -> (usize, CharPos, usize) {
+ let pos = self.relative_position(pos);
let (line, col_or_chpos) = self.lookup_file_pos(pos);
if line > 0 {
let col = col_or_chpos;
- let linebpos = self.lines(|lines| lines[line - 1]);
+ let linebpos = self.lines()[line - 1];
let col_display = {
let start_width_idx = self
.non_narrow_chars
@@ -1859,16 +1867,10 @@ impl SourceFile {
}
/// Normalizes the source code and records the normalizations.
-fn normalize_src(src: &mut String, start_pos: BytePos) -> Vec<NormalizedPos> {
+fn normalize_src(src: &mut String) -> Vec<NormalizedPos> {
let mut normalized_pos = vec![];
remove_bom(src, &mut normalized_pos);
normalize_newlines(src, &mut normalized_pos);
-
- // Offset all the positions by start_pos to match the final file positions.
- for np in &mut normalized_pos {
- np.pos.0 += start_pos.0;
- }
-
normalized_pos
}
@@ -1876,7 +1878,7 @@ fn normalize_src(src: &mut String, start_pos: BytePos) -> Vec<NormalizedPos> {
fn remove_bom(src: &mut String, normalized_pos: &mut Vec<NormalizedPos>) {
if src.starts_with('\u{feff}') {
src.drain(..3);
- normalized_pos.push(NormalizedPos { pos: BytePos(0), diff: 3 });
+ normalized_pos.push(NormalizedPos { pos: RelativeBytePos(0), diff: 3 });
}
}
@@ -1911,7 +1913,7 @@ fn normalize_newlines(src: &mut String, normalized_pos: &mut Vec<NormalizedPos>)
cursor += idx - gap_len;
gap_len += 1;
normalized_pos.push(NormalizedPos {
- pos: BytePos::from_usize(cursor + 1),
+ pos: RelativeBytePos::from_usize(cursor + 1),
diff: original_gap + gap_len as u32,
});
}
@@ -2013,6 +2015,10 @@ impl_pos! {
#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
pub struct BytePos(pub u32);
+ /// A byte offset relative to file beginning.
+ #[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
+ pub struct RelativeBytePos(pub u32);
+
/// A character offset.
///
/// Because of multibyte UTF-8 characters, a byte offset
@@ -2034,6 +2040,24 @@ impl<D: Decoder> Decodable<D> for BytePos {
}
}
+impl<H: HashStableContext> HashStable<H> for RelativeBytePos {
+ fn hash_stable(&self, hcx: &mut H, hasher: &mut StableHasher) {
+ self.0.hash_stable(hcx, hasher);
+ }
+}
+
+impl<S: Encoder> Encodable<S> for RelativeBytePos {
+ fn encode(&self, s: &mut S) {
+ s.emit_u32(self.0);
+ }
+}
+
+impl<D: Decoder> Decodable<D> for RelativeBytePos {
+ fn decode(d: &mut D) -> RelativeBytePos {
+ RelativeBytePos(d.read_u32())
+ }
+}
+
// _____________________________________________________________________________
// Loc, SourceFileAndLine, SourceFileAndBytePos
//
diff --git a/compiler/rustc_span/src/source_map.rs b/compiler/rustc_span/src/source_map.rs
index 983b2ab04..0b575c13a 100644
--- a/compiler/rustc_span/src/source_map.rs
+++ b/compiler/rustc_span/src/source_map.rs
@@ -14,16 +14,15 @@ pub use crate::*;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::stable_hasher::{Hash128, Hash64, StableHasher};
-use rustc_data_structures::sync::{
- AtomicU32, IntoDynSyncSend, Lrc, MappedReadGuard, ReadGuard, RwLock,
-};
+use rustc_data_structures::sync::{IntoDynSyncSend, Lrc, MappedReadGuard, ReadGuard, RwLock};
use std::cmp;
use std::hash::Hash;
use std::path::{self, Path, PathBuf};
-use std::sync::atomic::Ordering;
use std::fs;
use std::io;
+use std::io::BorrowedBuf;
+use std::io::Read;
#[cfg(test)]
mod tests;
@@ -101,10 +100,13 @@ pub trait FileLoader {
fn file_exists(&self, path: &Path) -> bool;
/// Read the contents of a UTF-8 file into memory.
+ /// This function must return a String because we normalize
+ /// source files, which may require resizing.
fn read_file(&self, path: &Path) -> io::Result<String>;
/// Read the contents of a potentially non-UTF-8 file into memory.
- fn read_binary_file(&self, path: &Path) -> io::Result<Vec<u8>>;
+ /// We don't normalize binary files, so we can start in an Lrc.
+ fn read_binary_file(&self, path: &Path) -> io::Result<Lrc<[u8]>>;
}
/// A FileLoader that uses std::fs to load real files.
@@ -119,8 +121,45 @@ impl FileLoader for RealFileLoader {
fs::read_to_string(path)
}
- fn read_binary_file(&self, path: &Path) -> io::Result<Vec<u8>> {
- fs::read(path)
+ fn read_binary_file(&self, path: &Path) -> io::Result<Lrc<[u8]>> {
+ let mut file = fs::File::open(path)?;
+ let len = file.metadata()?.len();
+
+ let mut bytes = Lrc::new_uninit_slice(len as usize);
+ let mut buf = BorrowedBuf::from(Lrc::get_mut(&mut bytes).unwrap());
+ match file.read_buf_exact(buf.unfilled()) {
+ Ok(()) => {}
+ Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => {
+ drop(bytes);
+ return fs::read(path).map(Vec::into);
+ }
+ Err(e) => return Err(e),
+ }
+ // SAFETY: If the read_buf_exact call returns Ok(()), then we have
+ // read len bytes and initialized the buffer.
+ let bytes = unsafe { bytes.assume_init() };
+
+ // At this point, we've read all the bytes that filesystem metadata reported exist.
+ // But we are not guaranteed to be at the end of the file, because we did not attempt to do
+ // a read with a non-zero-sized buffer and get Ok(0).
+ // So we do small read to a fixed-size buffer. If the read returns no bytes then we're
+ // already done, and we just return the Lrc we built above.
+ // If the read returns bytes however, we just fall back to reading into a Vec then turning
+ // that into an Lrc, losing our nice peak memory behavior. This fallback code path should
+ // be rarely exercised.
+
+ let mut probe = [0u8; 32];
+ let n = loop {
+ match file.read(&mut probe) {
+ Ok(0) => return Ok(bytes),
+ Err(e) if e.kind() == io::ErrorKind::Interrupted => continue,
+ Err(e) => return Err(e),
+ Ok(n) => break n,
+ }
+ };
+ let mut bytes: Vec<u8> = bytes.iter().copied().chain(probe[..n].iter().copied()).collect();
+ file.read_to_end(&mut bytes)?;
+ Ok(bytes.into())
}
}
@@ -174,9 +213,6 @@ pub(super) struct SourceMapFiles {
}
pub struct SourceMap {
- /// The address space below this value is currently used by the files in the source map.
- used_address_space: AtomicU32,
-
files: RwLock<SourceMapFiles>,
file_loader: IntoDynSyncSend<Box<dyn FileLoader + Sync + Send>>,
// This is used to apply the file path remapping as specified via
@@ -202,7 +238,6 @@ impl SourceMap {
hash_kind: SourceFileHashAlgorithm,
) -> SourceMap {
SourceMap {
- used_address_space: AtomicU32::new(0),
files: Default::default(),
file_loader: IntoDynSyncSend(file_loader),
path_mapping,
@@ -228,7 +263,7 @@ impl SourceMap {
///
/// Unlike `load_file`, guarantees that no normalization like BOM-removal
/// takes place.
- pub fn load_binary_file(&self, path: &Path) -> io::Result<Vec<u8>> {
+ pub fn load_binary_file(&self, path: &Path) -> io::Result<Lrc<[u8]>> {
let bytes = self.file_loader.read_binary_file(path)?;
// We need to add file to the `SourceMap`, so that it is present
@@ -254,26 +289,26 @@ impl SourceMap {
self.files.borrow().stable_id_to_source_file.get(&stable_id).cloned()
}
- fn allocate_address_space(&self, size: usize) -> Result<usize, OffsetOverflowError> {
- let size = u32::try_from(size).map_err(|_| OffsetOverflowError)?;
-
- loop {
- let current = self.used_address_space.load(Ordering::Relaxed);
- let next = current
- .checked_add(size)
- // Add one so there is some space between files. This lets us distinguish
- // positions in the `SourceMap`, even in the presence of zero-length files.
- .and_then(|next| next.checked_add(1))
- .ok_or(OffsetOverflowError)?;
-
- if self
- .used_address_space
- .compare_exchange(current, next, Ordering::Relaxed, Ordering::Relaxed)
- .is_ok()
- {
- return Ok(usize::try_from(current).unwrap());
- }
- }
+ fn register_source_file(
+ &self,
+ file_id: StableSourceFileId,
+ mut file: SourceFile,
+ ) -> Result<Lrc<SourceFile>, OffsetOverflowError> {
+ let mut files = self.files.borrow_mut();
+
+ file.start_pos = BytePos(if let Some(last_file) = files.source_files.last() {
+ // Add one so there is some space between files. This lets us distinguish
+ // positions in the `SourceMap`, even in the presence of zero-length files.
+ last_file.end_position().0.checked_add(1).ok_or(OffsetOverflowError)?
+ } else {
+ 0
+ });
+
+ let file = Lrc::new(file);
+ files.source_files.push(file.clone());
+ files.stable_id_to_source_file.insert(file_id, file.clone());
+
+ Ok(file)
}
/// Creates a new `SourceFile`.
@@ -297,32 +332,18 @@ impl SourceMap {
let (filename, _) = self.path_mapping.map_filename_prefix(&filename);
let file_id = StableSourceFileId::new_from_name(&filename, LOCAL_CRATE);
-
- let lrc_sf = match self.source_file_by_stable_id(file_id) {
- Some(lrc_sf) => lrc_sf,
+ match self.source_file_by_stable_id(file_id) {
+ Some(lrc_sf) => Ok(lrc_sf),
None => {
- let start_pos = self.allocate_address_space(src.len())?;
-
- let source_file = Lrc::new(SourceFile::new(
- filename,
- src,
- Pos::from_usize(start_pos),
- self.hash_kind,
- ));
+ let source_file = SourceFile::new(filename, src, self.hash_kind)?;
// Let's make sure the file_id we generated above actually matches
// the ID we generate for the SourceFile we just created.
debug_assert_eq!(StableSourceFileId::new(&source_file), file_id);
- let mut files = self.files.borrow_mut();
-
- files.source_files.push(source_file.clone());
- files.stable_id_to_source_file.insert(file_id, source_file.clone());
-
- source_file
+ self.register_source_file(file_id, source_file)
}
- };
- Ok(lrc_sf)
+ }
}
/// Allocates a new `SourceFile` representing a source file from an external
@@ -334,78 +355,37 @@ impl SourceMap {
filename: FileName,
src_hash: SourceFileHash,
name_hash: Hash128,
- source_len: usize,
+ source_len: u32,
cnum: CrateNum,
- file_local_lines: Lock<SourceFileLines>,
- mut file_local_multibyte_chars: Vec<MultiByteChar>,
- mut file_local_non_narrow_chars: Vec<NonNarrowChar>,
- mut file_local_normalized_pos: Vec<NormalizedPos>,
- original_start_pos: BytePos,
+ file_local_lines: FreezeLock<SourceFileLines>,
+ multibyte_chars: Vec<MultiByteChar>,
+ non_narrow_chars: Vec<NonNarrowChar>,
+ normalized_pos: Vec<NormalizedPos>,
metadata_index: u32,
) -> Lrc<SourceFile> {
- let start_pos = self
- .allocate_address_space(source_len)
- .expect("not enough address space for imported source file");
-
- let end_pos = Pos::from_usize(start_pos + source_len);
- let start_pos = Pos::from_usize(start_pos);
-
- // Translate these positions into the new global frame of reference,
- // now that the offset of the SourceFile is known.
- //
- // These are all unsigned values. `original_start_pos` may be larger or
- // smaller than `start_pos`, but `pos` is always larger than both.
- // Therefore, `(pos - original_start_pos) + start_pos` won't overflow
- // but `start_pos - original_start_pos` might. So we use the former
- // form rather than pre-computing the offset into a local variable. The
- // compiler backend can optimize away the repeated computations in a
- // way that won't trigger overflow checks.
- match &mut *file_local_lines.borrow_mut() {
- SourceFileLines::Lines(lines) => {
- for pos in lines {
- *pos = (*pos - original_start_pos) + start_pos;
- }
- }
- SourceFileLines::Diffs(SourceFileDiffs { line_start, .. }) => {
- *line_start = (*line_start - original_start_pos) + start_pos;
- }
- }
- for mbc in &mut file_local_multibyte_chars {
- mbc.pos = (mbc.pos - original_start_pos) + start_pos;
- }
- for swc in &mut file_local_non_narrow_chars {
- *swc = (*swc - original_start_pos) + start_pos;
- }
- for nc in &mut file_local_normalized_pos {
- nc.pos = (nc.pos - original_start_pos) + start_pos;
- }
+ let source_len = RelativeBytePos::from_u32(source_len);
- let source_file = Lrc::new(SourceFile {
+ let source_file = SourceFile {
name: filename,
src: None,
src_hash,
- external_src: Lock::new(ExternalSource::Foreign {
+ external_src: FreezeLock::new(ExternalSource::Foreign {
kind: ExternalSourceKind::AbsentOk,
metadata_index,
}),
- start_pos,
- end_pos,
+ start_pos: BytePos(0),
+ source_len,
lines: file_local_lines,
- multibyte_chars: file_local_multibyte_chars,
- non_narrow_chars: file_local_non_narrow_chars,
- normalized_pos: file_local_normalized_pos,
+ multibyte_chars,
+ non_narrow_chars,
+ normalized_pos,
name_hash,
cnum,
- });
-
- let mut files = self.files.borrow_mut();
-
- files.source_files.push(source_file.clone());
- files
- .stable_id_to_source_file
- .insert(StableSourceFileId::new(&source_file), source_file.clone());
+ };
- source_file
+ let file_id = StableSourceFileId::new(&source_file);
+ self.register_source_file(file_id, source_file)
+ .expect("not enough address space for imported source file")
}
/// If there is a doctest offset, applies it to the line.
@@ -439,6 +419,7 @@ impl SourceMap {
pub fn lookup_line(&self, pos: BytePos) -> Result<SourceFileAndLine, Lrc<SourceFile>> {
let f = self.lookup_source_file(pos);
+ let pos = f.relative_position(pos);
match f.lookup_line(pos) {
Some(line) => Ok(SourceFileAndLine { sf: f, line }),
None => Err(f),
@@ -534,7 +515,9 @@ impl SourceMap {
return true;
}
let f = (*self.files.borrow().source_files)[lo].clone();
- f.lookup_line(sp.lo()) != f.lookup_line(sp.hi())
+ let lo = f.relative_position(sp.lo());
+ let hi = f.relative_position(sp.hi());
+ f.lookup_line(lo) != f.lookup_line(hi)
}
#[instrument(skip(self), level = "trace")]
@@ -610,11 +593,11 @@ impl SourceMap {
end: (local_end.sf.name.clone(), local_end.sf.start_pos),
})))
} else {
- self.ensure_source_file_source_present(local_begin.sf.clone());
+ self.ensure_source_file_source_present(&local_begin.sf);
let start_index = local_begin.pos.to_usize();
let end_index = local_end.pos.to_usize();
- let source_len = (local_begin.sf.end_pos - local_begin.sf.start_pos).to_usize();
+ let source_len = local_begin.sf.source_len.to_usize();
if start_index > end_index || end_index > source_len {
return Err(SpanSnippetError::MalformedForSourcemap(MalformedSourceMapPositions {
@@ -627,7 +610,7 @@ impl SourceMap {
if let Some(ref src) = local_begin.sf.src {
extract_source(src, start_index, end_index)
- } else if let Some(src) = local_begin.sf.external_src.borrow().get_source() {
+ } else if let Some(src) = local_begin.sf.external_src.read().get_source() {
extract_source(src, start_index, end_index)
} else {
Err(SpanSnippetError::SourceNotAvailable { filename: local_begin.sf.name.clone() })
@@ -919,7 +902,7 @@ impl SourceMap {
let sp = sp.data();
let local_begin = self.lookup_byte_offset(sp.lo);
let start_index = local_begin.pos.to_usize();
- let src = local_begin.sf.external_src.borrow();
+ let src = local_begin.sf.external_src.read();
let snippet = if let Some(ref src) = local_begin.sf.src {
Some(&src[start_index..])
@@ -1021,7 +1004,7 @@ impl SourceMap {
return 1;
}
- let source_len = (local_begin.sf.end_pos - local_begin.sf.start_pos).to_usize();
+ let source_len = local_begin.sf.source_len.to_usize();
debug!("source_len=`{:?}`", source_len);
// Ensure indexes are also not malformed.
if start_index > end_index || end_index > source_len - 1 {
@@ -1029,7 +1012,7 @@ impl SourceMap {
return 1;
}
- let src = local_begin.sf.external_src.borrow();
+ let src = local_begin.sf.external_src.read();
let snippet = if let Some(src) = &local_begin.sf.src {
src
@@ -1076,7 +1059,7 @@ impl SourceMap {
self.files().iter().fold(0, |a, f| a + f.count_lines())
}
- pub fn ensure_source_file_source_present(&self, source_file: Lrc<SourceFile>) -> bool {
+ pub fn ensure_source_file_source_present(&self, source_file: &SourceFile) -> bool {
source_file.add_external_src(|| {
let FileName::Real(ref name) = source_file.name else {
return None;
diff --git a/compiler/rustc_span/src/source_map/tests.rs b/compiler/rustc_span/src/source_map/tests.rs
index 686b3b00d..a12f50c87 100644
--- a/compiler/rustc_span/src/source_map/tests.rs
+++ b/compiler/rustc_span/src/source_map/tests.rs
@@ -1,6 +1,6 @@
use super::*;
-use rustc_data_structures::sync::Lrc;
+use rustc_data_structures::sync::{FreezeLock, Lrc};
fn init_source_map() -> SourceMap {
let sm = SourceMap::new(FilePathMapping::empty());
@@ -50,6 +50,7 @@ impl SourceMap {
fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
let idx = self.lookup_source_file_idx(bpos);
let sf = &(*self.files.borrow().source_files)[idx];
+ let bpos = sf.relative_position(bpos);
sf.bytepos_to_file_charpos(bpos)
}
}
@@ -230,8 +231,7 @@ fn t10() {
let SourceFile {
name,
src_hash,
- start_pos,
- end_pos,
+ source_len,
lines,
multibyte_chars,
non_narrow_chars,
@@ -244,13 +244,12 @@ fn t10() {
name,
src_hash,
name_hash,
- (end_pos - start_pos).to_usize(),
+ source_len.to_u32(),
CrateNum::new(0),
- lines,
+ FreezeLock::new(lines.read().clone()),
multibyte_chars,
non_narrow_chars,
normalized_pos,
- start_pos,
0,
);
@@ -568,3 +567,30 @@ fn test_next_point() {
assert_eq!(span.hi().0, 6);
assert!(sm.span_to_snippet(span).is_err());
}
+
+#[cfg(target_os = "linux")]
+#[test]
+fn read_binary_file_handles_lying_stat() {
+ // read_binary_file tries to read the contents of a file into an Lrc<[u8]> while
+ // never having two copies of the data in memory at once. This is an optimization
+ // to support include_bytes! with large files. But since Rust allocators are
+ // sensitive to alignment, our implementation can't be bootstrapped off calling
+ // std::fs::read. So we test that we have the same behavior even on files where
+ // fs::metadata lies.
+
+ // stat always says that /proc/self/cmdline is length 0, but it isn't.
+ let cmdline = Path::new("/proc/self/cmdline");
+ let len = std::fs::metadata(cmdline).unwrap().len() as usize;
+ let real = std::fs::read(cmdline).unwrap();
+ assert!(len < real.len());
+ let bin = RealFileLoader.read_binary_file(cmdline).unwrap();
+ assert_eq!(&real[..], &bin[..]);
+
+ // stat always says that /sys/devices/system/cpu/kernel_max is the size of a block.
+ let kernel_max = Path::new("/sys/devices/system/cpu/kernel_max");
+ let len = std::fs::metadata(kernel_max).unwrap().len() as usize;
+ let real = std::fs::read(kernel_max).unwrap();
+ assert!(len > real.len());
+ let bin = RealFileLoader.read_binary_file(kernel_max).unwrap();
+ assert_eq!(&real[..], &bin[..]);
+}
diff --git a/compiler/rustc_span/src/span_encoding.rs b/compiler/rustc_span/src/span_encoding.rs
index 1eea0f63c..93ab15460 100644
--- a/compiler/rustc_span/src/span_encoding.rs
+++ b/compiler/rustc_span/src/span_encoding.rs
@@ -1,9 +1,3 @@
-// Spans are encoded using 1-bit tag and 2 different encoding formats (one for each tag value).
-// One format is used for keeping span data inline,
-// another contains index into an out-of-line span interner.
-// The encoding format for inline spans were obtained by optimizing over crates in rustc/libstd.
-// See https://internals.rust-lang.org/t/rfc-compiler-refactoring-spans/1357/28
-
use crate::def_id::{DefIndex, LocalDefId};
use crate::hygiene::SyntaxContext;
use crate::SPAN_TRACK;
@@ -13,59 +7,69 @@ use rustc_data_structures::fx::FxIndexSet;
/// A compressed span.
///
-/// Whereas [`SpanData`] is 16 bytes, which is a bit too big to stick everywhere, `Span`
-/// is a form that only takes up 8 bytes, with less space for the length, parent and
-/// context. The vast majority (99.9%+) of `SpanData` instances will fit within
-/// those 8 bytes; any `SpanData` whose fields don't fit into a `Span` are
+/// [`SpanData`] is 16 bytes, which is too big to stick everywhere. `Span` only
+/// takes up 8 bytes, with less space for the length, parent and context. The
+/// vast majority (99.9%+) of `SpanData` instances can be made to fit within
+/// those 8 bytes. Any `SpanData` whose fields don't fit into a `Span` are
/// stored in a separate interner table, and the `Span` will index into that
/// table. Interning is rare enough that the cost is low, but common enough
/// that the code is exercised regularly.
///
/// An earlier version of this code used only 4 bytes for `Span`, but that was
/// slower because only 80--90% of spans could be stored inline (even less in
-/// very large crates) and so the interner was used a lot more.
+/// very large crates) and so the interner was used a lot more. That version of
+/// the code also predated the storage of parents.
+///
+/// There are four different span forms.
///
-/// Inline (compressed) format with no parent:
-/// - `span.base_or_index == span_data.lo`
-/// - `span.len_or_tag == len == span_data.hi - span_data.lo` (must be `<= MAX_LEN`)
-/// - `span.ctxt_or_tag == span_data.ctxt` (must be `<= MAX_CTXT`)
+/// Inline-context format (requires non-huge length, non-huge context, and no parent):
+/// - `span.lo_or_index == span_data.lo`
+/// - `span.len_with_tag_or_marker == len == span_data.hi - span_data.lo` (must be `<= MAX_LEN`)
+/// - `span.ctxt_or_parent_or_marker == span_data.ctxt` (must be `<= MAX_CTXT`)
///
-/// Interned format with inline `SyntaxContext`:
-/// - `span.base_or_index == index` (indexes into the interner table)
-/// - `span.len_or_tag == LEN_TAG` (high bit set, all other bits are zero)
-/// - `span.ctxt_or_tag == span_data.ctxt` (must be `<= MAX_CTXT`)
+/// Inline-parent format (requires non-huge length, root context, and non-huge parent):
+/// - `span.lo_or_index == span_data.lo`
+/// - `span.len_with_tag_or_marker & !PARENT_TAG == len == span_data.hi - span_data.lo`
+/// (must be `<= MAX_LEN`)
+/// - `span.len_with_tag_or_marker` has top bit (`PARENT_TAG`) set
+/// - `span.ctxt_or_parent_or_marker == span_data.parent` (must be `<= MAX_CTXT`)
///
-/// Inline (compressed) format with root context:
-/// - `span.base_or_index == span_data.lo`
-/// - `span.len_or_tag == len == span_data.hi - span_data.lo` (must be `<= MAX_LEN`)
-/// - `span.len_or_tag` has top bit (`PARENT_MASK`) set
-/// - `span.ctxt == span_data.parent` (must be `<= MAX_CTXT`)
+/// Partially-interned format (requires non-huge context):
+/// - `span.lo_or_index == index` (indexes into the interner table)
+/// - `span.len_with_tag_or_marker == BASE_LEN_INTERNED_MARKER`
+/// - `span.ctxt_or_parent_or_marker == span_data.ctxt` (must be `<= MAX_CTXT`)
///
-/// Interned format:
-/// - `span.base_or_index == index` (indexes into the interner table)
-/// - `span.len_or_tag == LEN_TAG` (high bit set, all other bits are zero)
-/// - `span.ctxt_or_tag == CTXT_TAG`
+/// Fully-interned format (all cases not covered above):
+/// - `span.lo_or_index == index` (indexes into the interner table)
+/// - `span.len_with_tag_or_marker == BASE_LEN_INTERNED_MARKER`
+/// - `span.ctxt_or_parent_or_marker == CTXT_INTERNED_MARKER`
///
-/// The inline form uses 0 for the tag value (rather than 1) so that we don't
-/// need to mask out the tag bit when getting the length, and so that the
-/// dummy span can be all zeroes.
+/// The partially-interned form requires looking in the interning table for
+/// lo and length, but the context is stored inline as well as interned.
+/// This is useful because context lookups are often done in isolation, and
+/// inline lookups are quicker.
///
/// Notes about the choice of field sizes:
-/// - `base` is 32 bits in both `Span` and `SpanData`, which means that `base`
-/// values never cause interning. The number of bits needed for `base`
+/// - `lo` is 32 bits in both `Span` and `SpanData`, which means that `lo`
+/// values never cause interning. The number of bits needed for `lo`
/// depends on the crate size. 32 bits allows up to 4 GiB of code in a crate.
-/// - `len` is 15 bits in `Span` (a u16, minus 1 bit for the tag) and 32 bits
-/// in `SpanData`, which means that large `len` values will cause interning.
-/// The number of bits needed for `len` does not depend on the crate size.
-/// The most common numbers of bits for `len` are from 0 to 7, with a peak usually
-/// at 3 or 4, and then it drops off quickly from 8 onwards. 15 bits is enough
-/// for 99.99%+ of cases, but larger values (sometimes 20+ bits) might occur
-/// dozens of times in a typical crate.
-/// - `ctxt_or_tag` is 16 bits in `Span` and 32 bits in `SpanData`, which means that
-/// large `ctxt` values will cause interning. The number of bits needed for
-/// `ctxt` values depend partly on the crate size and partly on the form of
-/// the code. No crates in `rustc-perf` need more than 15 bits for `ctxt_or_tag`,
-/// but larger crates might need more than 16 bits.
+/// Having no compression on this field means there is no performance cliff
+/// if a crate exceeds a particular size.
+/// - `len` is ~15 bits in `Span` (a u16, minus 1 bit for PARENT_TAG) and 32
+/// bits in `SpanData`, which means that large `len` values will cause
+/// interning. The number of bits needed for `len` does not depend on the
+/// crate size. The most common numbers of bits for `len` are from 0 to 7,
+/// with a peak usually at 3 or 4, and then it drops off quickly from 8
+/// onwards. 15 bits is enough for 99.99%+ of cases, but larger values
+/// (sometimes 20+ bits) might occur dozens of times in a typical crate.
+/// - `ctxt_or_parent_or_marker` is 16 bits in `Span` and two 32 bit fields in
+/// `SpanData`, which means intering will happen if `ctxt` is large, if
+/// `parent` is large, or if both values are non-zero. The number of bits
+/// needed for `ctxt` values depend partly on the crate size and partly on
+/// the form of the code. No crates in `rustc-perf` need more than 15 bits
+/// for `ctxt_or_parent_or_marker`, but larger crates might need more than 16
+/// bits. The number of bits needed for `parent` hasn't been measured,
+/// because `parent` isn't currently used by default.
///
/// In order to reliably use parented spans in incremental compilation,
/// the dependency to the parent definition's span. This is performed
@@ -74,19 +78,22 @@ use rustc_data_structures::fx::FxIndexSet;
#[derive(Clone, Copy, Eq, PartialEq, Hash)]
#[rustc_pass_by_value]
pub struct Span {
- base_or_index: u32,
- len_or_tag: u16,
- ctxt_or_tag: u16,
+ lo_or_index: u32,
+ len_with_tag_or_marker: u16,
+ ctxt_or_parent_or_marker: u16,
}
-const LEN_TAG: u16 = 0b1111_1111_1111_1111;
-const PARENT_MASK: u16 = 0b1000_0000_0000_0000;
-const MAX_LEN: u32 = 0b0111_1111_1111_1111;
-const CTXT_TAG: u32 = 0b1111_1111_1111_1111;
-const MAX_CTXT: u32 = CTXT_TAG - 1;
+// `MAX_LEN` is chosen so that `PARENT_TAG | MAX_LEN` is distinct from
+// `BASE_LEN_INTERNED_MARKER`. (If `MAX_LEN` was 1 higher, this wouldn't be true.)
+const MAX_LEN: u32 = 0b0111_1111_1111_1110;
+const MAX_CTXT: u32 = 0b0111_1111_1111_1110;
+const PARENT_TAG: u16 = 0b1000_0000_0000_0000;
+const BASE_LEN_INTERNED_MARKER: u16 = 0b1111_1111_1111_1111;
+const CTXT_INTERNED_MARKER: u16 = 0b1111_1111_1111_1111;
-/// Dummy span, both position and length are zero, syntax context is zero as well.
-pub const DUMMY_SP: Span = Span { base_or_index: 0, len_or_tag: 0, ctxt_or_tag: 0 };
+/// The dummy span has zero position, length, and context, and no parent.
+pub const DUMMY_SP: Span =
+ Span { lo_or_index: 0, len_with_tag_or_marker: 0, ctxt_or_parent_or_marker: 0 };
impl Span {
#[inline]
@@ -100,39 +107,43 @@ impl Span {
std::mem::swap(&mut lo, &mut hi);
}
- let (base, len, ctxt2) = (lo.0, hi.0 - lo.0, ctxt.as_u32());
-
- if len <= MAX_LEN && ctxt2 <= MAX_CTXT {
- let len_or_tag = len as u16;
- debug_assert_eq!(len_or_tag & PARENT_MASK, 0);
+ let (lo2, len, ctxt2) = (lo.0, hi.0 - lo.0, ctxt.as_u32());
- if let Some(parent) = parent {
- // Inline format with parent.
- let len_or_tag = len_or_tag | PARENT_MASK;
- let parent2 = parent.local_def_index.as_u32();
- if ctxt2 == SyntaxContext::root().as_u32()
- && parent2 <= MAX_CTXT
- && len_or_tag < LEN_TAG
- {
- debug_assert_ne!(len_or_tag, LEN_TAG);
- return Span { base_or_index: base, len_or_tag, ctxt_or_tag: parent2 as u16 };
- }
- } else {
- // Inline format with ctxt.
- debug_assert_ne!(len_or_tag, LEN_TAG);
+ if len <= MAX_LEN {
+ if ctxt2 <= MAX_CTXT && parent.is_none() {
+ // Inline-context format.
return Span {
- base_or_index: base,
- len_or_tag: len as u16,
- ctxt_or_tag: ctxt2 as u16,
+ lo_or_index: lo2,
+ len_with_tag_or_marker: len as u16,
+ ctxt_or_parent_or_marker: ctxt2 as u16,
+ };
+ } else if ctxt2 == SyntaxContext::root().as_u32()
+ && let Some(parent) = parent
+ && let parent2 = parent.local_def_index.as_u32()
+ && parent2 <= MAX_CTXT
+ {
+ // Inline-parent format.
+ return Span {
+ lo_or_index: lo2,
+ len_with_tag_or_marker: PARENT_TAG | len as u16,
+ ctxt_or_parent_or_marker: parent2 as u16
};
}
}
- // Interned format.
+ // Partially-interned or fully-interned format.
let index =
with_span_interner(|interner| interner.intern(&SpanData { lo, hi, ctxt, parent }));
- let ctxt_or_tag = if ctxt2 <= MAX_CTXT { ctxt2 } else { CTXT_TAG } as u16;
- Span { base_or_index: index, len_or_tag: LEN_TAG, ctxt_or_tag }
+ let ctxt_or_parent_or_marker = if ctxt2 <= MAX_CTXT {
+ ctxt2 as u16 // partially-interned
+ } else {
+ CTXT_INTERNED_MARKER // fully-interned
+ };
+ Span {
+ lo_or_index: index,
+ len_with_tag_or_marker: BASE_LEN_INTERNED_MARKER,
+ ctxt_or_parent_or_marker,
+ }
}
#[inline]
@@ -148,56 +159,80 @@ impl Span {
/// This function must not be used outside the incremental engine.
#[inline]
pub fn data_untracked(self) -> SpanData {
- if self.len_or_tag != LEN_TAG {
- // Inline format.
- if self.len_or_tag & PARENT_MASK == 0 {
- debug_assert!(self.len_or_tag as u32 <= MAX_LEN);
+ if self.len_with_tag_or_marker != BASE_LEN_INTERNED_MARKER {
+ if self.len_with_tag_or_marker & PARENT_TAG == 0 {
+ // Inline-context format.
+ let len = self.len_with_tag_or_marker as u32;
+ debug_assert!(len <= MAX_LEN);
SpanData {
- lo: BytePos(self.base_or_index),
- hi: BytePos(self.base_or_index + self.len_or_tag as u32),
- ctxt: SyntaxContext::from_u32(self.ctxt_or_tag as u32),
+ lo: BytePos(self.lo_or_index),
+ hi: BytePos(self.lo_or_index + len),
+ ctxt: SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32),
parent: None,
}
} else {
- let len = self.len_or_tag & !PARENT_MASK;
- debug_assert!(len as u32 <= MAX_LEN);
- let parent =
- LocalDefId { local_def_index: DefIndex::from_u32(self.ctxt_or_tag as u32) };
+ // Inline-parent format.
+ let len = (self.len_with_tag_or_marker & !PARENT_TAG) as u32;
+ debug_assert!(len <= MAX_LEN);
+ let parent = LocalDefId {
+ local_def_index: DefIndex::from_u32(self.ctxt_or_parent_or_marker as u32),
+ };
SpanData {
- lo: BytePos(self.base_or_index),
- hi: BytePos(self.base_or_index + len as u32),
+ lo: BytePos(self.lo_or_index),
+ hi: BytePos(self.lo_or_index + len),
ctxt: SyntaxContext::root(),
parent: Some(parent),
}
}
} else {
- // Interned format.
- let index = self.base_or_index;
+ // Fully-interned or partially-interned format. In either case,
+ // the interned value contains all the data, so we don't need to
+ // distinguish them.
+ let index = self.lo_or_index;
with_span_interner(|interner| interner.spans[index as usize])
}
}
+ /// Returns `true` if this is a dummy span with any hygienic context.
+ #[inline]
+ pub fn is_dummy(self) -> bool {
+ if self.len_with_tag_or_marker != BASE_LEN_INTERNED_MARKER {
+ // Inline-context or inline-parent format.
+ let lo = self.lo_or_index;
+ let len = (self.len_with_tag_or_marker & !PARENT_TAG) as u32;
+ debug_assert!(len <= MAX_LEN);
+ lo == 0 && len == 0
+ } else {
+ // Fully-interned or partially-interned format.
+ let index = self.lo_or_index;
+ let data = with_span_interner(|interner| interner.spans[index as usize]);
+ data.lo == BytePos(0) && data.hi == BytePos(0)
+ }
+ }
+
/// This function is used as a fast path when decoding the full `SpanData` is not necessary.
+ /// It's a cut-down version of `data_untracked`.
#[inline]
pub fn ctxt(self) -> SyntaxContext {
- let ctxt_or_tag = self.ctxt_or_tag as u32;
- // Check for interned format.
- if self.len_or_tag == LEN_TAG {
- if ctxt_or_tag == CTXT_TAG {
- // Fully interned format.
- let index = self.base_or_index;
- with_span_interner(|interner| interner.spans[index as usize].ctxt)
+ if self.len_with_tag_or_marker != BASE_LEN_INTERNED_MARKER {
+ if self.len_with_tag_or_marker & PARENT_TAG == 0 {
+ // Inline-context format.
+ SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32)
} else {
- // Interned format with inline ctxt.
- SyntaxContext::from_u32(ctxt_or_tag)
+ // Inline-parent format. We know that the SyntaxContext is root.
+ SyntaxContext::root()
}
- } else if self.len_or_tag & PARENT_MASK == 0 {
- // Inline format with inline ctxt.
- SyntaxContext::from_u32(ctxt_or_tag)
} else {
- // Inline format with inline parent.
- // We know that the SyntaxContext is root.
- SyntaxContext::root()
+ if self.ctxt_or_parent_or_marker != CTXT_INTERNED_MARKER {
+ // Partially-interned format. This path avoids looking up the
+ // interned value, and is the whole point of the
+ // partially-interned format.
+ SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32)
+ } else {
+ // Fully-interned format.
+ let index = self.lo_or_index;
+ with_span_interner(|interner| interner.spans[index as usize].ctxt)
+ }
}
}
}
diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs
index 28a2dfebc..4f4625662 100644
--- a/compiler/rustc_span/src/symbol.rs
+++ b/compiler/rustc_span/src/symbol.rs
@@ -232,11 +232,13 @@ symbols! {
NonZeroI32,
NonZeroI64,
NonZeroI8,
+ NonZeroIsize,
NonZeroU128,
NonZeroU16,
NonZeroU32,
NonZeroU64,
NonZeroU8,
+ NonZeroUsize,
None,
Ok,
Option,
@@ -278,6 +280,7 @@ symbols! {
RwLock,
RwLockReadGuard,
RwLockWriteGuard,
+ Saturating,
Send,
SeqCst,
SliceIndex,
@@ -305,6 +308,7 @@ symbols! {
Vec,
VecDeque,
Wrapper,
+ Wrapping,
Yield,
_DECLS,
_Self,
@@ -383,6 +387,7 @@ symbols! {
asm_sym,
asm_unwind,
assert,
+ assert_eq,
assert_eq_macro,
assert_inhabited,
assert_macro,
@@ -569,6 +574,8 @@ symbols! {
cosf32,
cosf64,
count,
+ coverage,
+ coverage_attribute,
cr,
crate_id,
crate_in_paths,
@@ -585,6 +592,7 @@ symbols! {
cttz,
cttz_nonzero,
custom_attribute,
+ custom_code_classes_in_docs,
custom_derive,
custom_inner_attributes,
custom_mir,
@@ -1064,6 +1072,7 @@ symbols! {
note,
object_safe_for_dispatch,
of,
+ off,
offset,
offset_of,
omit_gdb_pretty_printer_section,
@@ -1100,6 +1109,7 @@ symbols! {
panic_handler,
panic_impl,
panic_implementation,
+ panic_in_cleanup,
panic_info,
panic_location,
panic_misaligned_pointer_dereference,
@@ -1169,7 +1179,6 @@ symbols! {
ptr_cast_const,
ptr_cast_mut,
ptr_const_is_null,
- ptr_from_mut,
ptr_from_ref,
ptr_guaranteed_cmp,
ptr_is_null,
@@ -1179,6 +1188,9 @@ symbols! {
ptr_offset_from,
ptr_offset_from_unsigned,
ptr_unique,
+ ptr_write,
+ ptr_write_unaligned,
+ ptr_write_volatile,
pub_macro_rules,
pub_restricted,
public,
@@ -1273,6 +1285,7 @@ symbols! {
rust_eh_catch_typeinfo,
rust_eh_personality,
rustc,
+ rustc_abi,
rustc_allocator,
rustc_allocator_zeroed,
rustc_allow_const_fn_unstable,
@@ -1324,6 +1337,7 @@ symbols! {
rustc_main,
rustc_mir,
rustc_must_implement_one_of,
+ rustc_never_returns_null_ptr,
rustc_nonnull_optimization_guaranteed,
rustc_nounwind,
rustc_object_lifetime_default,
@@ -1357,6 +1371,7 @@ symbols! {
rustc_trivial_field_reads,
rustc_unsafe_specialization_marker,
rustc_variance,
+ rustc_variance_of_opaques,
rustdoc,
rustdoc_internals,
rustdoc_missing_doc_code_examples,
@@ -1370,6 +1385,7 @@ symbols! {
sanitizer_cfi_normalize_integers,
sanitizer_runtime,
saturating_add,
+ saturating_div,
saturating_sub,
self_in_typedefs,
self_struct_ctor,
@@ -1449,6 +1465,7 @@ symbols! {
simd_shl,
simd_shr,
simd_shuffle,
+ simd_shuffle_generic,
simd_sub,
simd_trunc,
simd_xor,
@@ -1615,6 +1632,7 @@ symbols! {
unix_sigpipe,
unlikely,
unmarked_api,
+ unnamed_fields,
unpin,
unreachable,
unreachable_2015,
@@ -1627,6 +1645,7 @@ symbols! {
unsafe_block_in_unsafe_fn,
unsafe_cell,
unsafe_cell_from_mut,
+ unsafe_cell_raw_get,
unsafe_no_drop_flag,
unsafe_pin_internals,
unsize,
@@ -1687,7 +1706,10 @@ symbols! {
windows_subsystem,
with_negative_coherence,
wrapping_add,
+ wrapping_div,
wrapping_mul,
+ wrapping_rem,
+ wrapping_rem_euclid,
wrapping_sub,
wreg,
write_bytes,
diff --git a/compiler/rustc_span/src/tests.rs b/compiler/rustc_span/src/tests.rs
index a242ad6d1..cb88fa890 100644
--- a/compiler/rustc_span/src/tests.rs
+++ b/compiler/rustc_span/src/tests.rs
@@ -3,24 +3,21 @@ use super::*;
#[test]
fn test_lookup_line() {
let source = "abcdefghijklm\nabcdefghij\n...".to_owned();
- let sf = SourceFile::new(
- FileName::Anon(Hash64::ZERO),
- source,
- BytePos(3),
- SourceFileHashAlgorithm::Sha256,
- );
- sf.lines(|lines| assert_eq!(lines, &[BytePos(3), BytePos(17), BytePos(28)]));
+ let mut sf =
+ SourceFile::new(FileName::Anon(Hash64::ZERO), source, SourceFileHashAlgorithm::Sha256)
+ .unwrap();
+ sf.start_pos = BytePos(3);
+ assert_eq!(sf.lines(), &[RelativeBytePos(0), RelativeBytePos(14), RelativeBytePos(25)]);
- assert_eq!(sf.lookup_line(BytePos(0)), None);
- assert_eq!(sf.lookup_line(BytePos(3)), Some(0));
- assert_eq!(sf.lookup_line(BytePos(4)), Some(0));
+ assert_eq!(sf.lookup_line(RelativeBytePos(0)), Some(0));
+ assert_eq!(sf.lookup_line(RelativeBytePos(1)), Some(0));
- assert_eq!(sf.lookup_line(BytePos(16)), Some(0));
- assert_eq!(sf.lookup_line(BytePos(17)), Some(1));
- assert_eq!(sf.lookup_line(BytePos(18)), Some(1));
+ assert_eq!(sf.lookup_line(RelativeBytePos(13)), Some(0));
+ assert_eq!(sf.lookup_line(RelativeBytePos(14)), Some(1));
+ assert_eq!(sf.lookup_line(RelativeBytePos(15)), Some(1));
- assert_eq!(sf.lookup_line(BytePos(28)), Some(2));
- assert_eq!(sf.lookup_line(BytePos(29)), Some(2));
+ assert_eq!(sf.lookup_line(RelativeBytePos(25)), Some(2));
+ assert_eq!(sf.lookup_line(RelativeBytePos(26)), Some(2));
}
#[test]
diff --git a/compiler/rustc_symbol_mangling/src/legacy.rs b/compiler/rustc_symbol_mangling/src/legacy.rs
index 3a3356808..2fc102bda 100644
--- a/compiler/rustc_symbol_mangling/src/legacy.rs
+++ b/compiler/rustc_symbol_mangling/src/legacy.rs
@@ -230,7 +230,7 @@ impl<'tcx> Printer<'tcx> for &mut SymbolPrinter<'tcx> {
self.write_str("[")?;
self = self.print_type(ty)?;
self.write_str("; ")?;
- if let Some(size) = size.try_to_bits(self.tcx().data_layout.pointer_size) {
+ if let Some(size) = size.try_to_target_usize(self.tcx()) {
write!(self, "{size}")?
} else if let ty::ConstKind::Param(param) = size.kind() {
self = param.print(self)?
diff --git a/compiler/rustc_symbol_mangling/src/lib.rs b/compiler/rustc_symbol_mangling/src/lib.rs
index 74538e9f5..535a3ea2d 100644
--- a/compiler/rustc_symbol_mangling/src/lib.rs
+++ b/compiler/rustc_symbol_mangling/src/lib.rs
@@ -108,7 +108,6 @@ use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
use rustc_middle::mir::mono::{InstantiationMode, MonoItem};
use rustc_middle::query::Providers;
-use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, Instance, TyCtxt};
use rustc_session::config::SymbolManglingVersion;
@@ -144,7 +143,7 @@ fn symbol_name_provider<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> ty
// This closure determines the instantiating crate for instances that
// need an instantiating-crate-suffix for their symbol name, in order
// to differentiate between local copies.
- if is_generic(instance.args) {
+ if is_generic(instance, tcx) {
// For generics we might find re-usable upstream instances. If there
// is one, we rely on the symbol being instantiated locally.
instance.upstream_monomorphization(tcx).unwrap_or(LOCAL_CRATE)
@@ -246,7 +245,7 @@ fn compute_symbol_name<'tcx>(
// the ID of the instantiating crate. This avoids symbol conflicts
// in case the same instances is emitted in two crates of the same
// project.
- let avoid_cross_crate_conflicts = is_generic(args) || is_globally_shared_function;
+ let avoid_cross_crate_conflicts = is_generic(instance, tcx) || is_globally_shared_function;
let instantiating_crate = avoid_cross_crate_conflicts.then(compute_instantiating_crate);
@@ -278,6 +277,6 @@ fn compute_symbol_name<'tcx>(
symbol
}
-fn is_generic(args: GenericArgsRef<'_>) -> bool {
- args.non_erasable_generics().next().is_some()
+fn is_generic<'tcx>(instance: Instance<'tcx>, tcx: TyCtxt<'tcx>) -> bool {
+ instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some()
}
diff --git a/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs b/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs
index d345368d5..6ad3e7155 100644
--- a/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs
+++ b/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs
@@ -118,7 +118,7 @@ fn encode_const<'tcx>(
// bool value false is encoded as 0 and true as 1.
match c.ty().kind() {
ty::Int(ity) => {
- let bits = c.eval_bits(tcx, ty::ParamEnv::reveal_all(), c.ty());
+ let bits = c.eval_bits(tcx, ty::ParamEnv::reveal_all());
let val = Integer::from_int_ty(&tcx, *ity).size().sign_extend(bits) as i128;
if val < 0 {
s.push('n');
@@ -126,7 +126,7 @@ fn encode_const<'tcx>(
let _ = write!(s, "{val}");
}
ty::Uint(_) => {
- let val = c.eval_bits(tcx, ty::ParamEnv::reveal_all(), c.ty());
+ let val = c.eval_bits(tcx, ty::ParamEnv::reveal_all());
let _ = write!(s, "{val}");
}
ty::Bool => {
@@ -447,7 +447,7 @@ fn encode_ty<'tcx>(
typeid.push('b');
}
- ty::Int(..) | ty::Uint(..) | ty::Float(..) => {
+ ty::Int(..) | ty::Uint(..) => {
// u<length><type-name> as vendor extended type
let mut s = String::from(match ty.kind() {
ty::Int(IntTy::I8) => "u2i8",
@@ -462,14 +462,23 @@ fn encode_ty<'tcx>(
ty::Uint(UintTy::U64) => "u3u64",
ty::Uint(UintTy::U128) => "u4u128",
ty::Uint(UintTy::Usize) => "u5usize",
- ty::Float(FloatTy::F32) => "u3f32",
- ty::Float(FloatTy::F64) => "u3f64",
- _ => "",
+ _ => bug!("encode_ty: unexpected `{:?}`", ty.kind()),
});
compress(dict, DictKey::Ty(ty, TyQ::None), &mut s);
typeid.push_str(&s);
}
+ // Rust's f32 and f64 single (32-bit) and double (64-bit) precision floating-point types
+ // have IEEE-754 binary32 and binary64 floating-point layouts, respectively.
+ //
+ // (See https://rust-lang.github.io/unsafe-code-guidelines/layout/scalars.html#fixed-width-floating-point-types.)
+ ty::Float(float_ty) => {
+ typeid.push(match float_ty {
+ FloatTy::F32 => 'f',
+ FloatTy::F64 => 'd',
+ });
+ }
+
ty::Char => {
// u4char as vendor extended type
let mut s = String::from("u4char");
@@ -711,7 +720,6 @@ fn encode_ty<'tcx>(
| ty::Bound(..)
| ty::Error(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Infer(..)
| ty::Placeholder(..) => {
bug!("encode_ty: unexpected `{:?}`", ty.kind());
@@ -964,12 +972,7 @@ fn transform_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, options: TransformTyOptio
);
}
- ty::Bound(..)
- | ty::Error(..)
- | ty::GeneratorWitnessMIR(..)
- | ty::Infer(..)
- | ty::Param(..)
- | ty::Placeholder(..) => {
+ ty::Bound(..) | ty::Error(..) | ty::Infer(..) | ty::Param(..) | ty::Placeholder(..) => {
bug!("transform_ty: unexpected `{:?}`", ty.kind());
}
}
diff --git a/compiler/rustc_symbol_mangling/src/v0.rs b/compiler/rustc_symbol_mangling/src/v0.rs
index da19a3ba4..82b1a772e 100644
--- a/compiler/rustc_symbol_mangling/src/v0.rs
+++ b/compiler/rustc_symbol_mangling/src/v0.rs
@@ -329,7 +329,7 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
// Late-bound lifetimes use indices starting at 1,
// see `BinderLevel` for more details.
- ty::ReLateBound(debruijn, ty::BoundRegion { var, kind: ty::BrAnon(_) }) => {
+ ty::ReLateBound(debruijn, ty::BoundRegion { var, kind: ty::BrAnon }) => {
let binder = &self.binders[self.binders.len() - 1 - debruijn.index()];
let depth = binder.lifetime_depths.start + var.as_u32();
@@ -484,8 +484,7 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
ty::Alias(ty::Inherent, _) => bug!("symbol_names: unexpected inherent projection"),
ty::Alias(ty::Weak, _) => bug!("symbol_names: unexpected weak projection"),
- ty::GeneratorWitness(_) => bug!("symbol_names: unexpected `GeneratorWitness`"),
- ty::GeneratorWitnessMIR(..) => bug!("symbol_names: unexpected `GeneratorWitnessMIR`"),
+ ty::GeneratorWitness(..) => bug!("symbol_names: unexpected `GeneratorWitness`"),
}
// Only cache types that do not refer to an enclosing
@@ -562,7 +561,7 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
fn print_const(mut self, ct: ty::Const<'tcx>) -> Result<Self::Const, Self::Error> {
// We only mangle a typed value if the const can be evaluated.
- let ct = ct.eval(self.tcx, ty::ParamEnv::reveal_all());
+ let ct = ct.normalize(self.tcx, ty::ParamEnv::reveal_all());
match ct.kind() {
ty::ConstKind::Value(_) => {}
@@ -594,7 +593,7 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Char => {
self = ty.print(self)?;
- let mut bits = ct.eval_bits(self.tcx, ty::ParamEnv::reveal_all(), ty);
+ let mut bits = ct.eval_bits(self.tcx, ty::ParamEnv::reveal_all());
// Negative integer values are mangled using `n` as a "sign prefix".
if let ty::Int(ity) = ty.kind() {
diff --git a/compiler/rustc_target/src/abi/call/loongarch.rs b/compiler/rustc_target/src/abi/call/loongarch.rs
index 247256f07..e649d58bb 100644
--- a/compiler/rustc_target/src/abi/call/loongarch.rs
+++ b/compiler/rustc_target/src/abi/call/loongarch.rs
@@ -83,6 +83,17 @@ where
}
FieldsShape::Union(_) => {
if !arg_layout.is_zst() {
+ if arg_layout.is_transparent() {
+ let non_1zst_elem = arg_layout.non_1zst_field(cx).expect("not exactly one non-1-ZST field in non-ZST repr(transparent) union").1;
+ return should_use_fp_conv_helper(
+ cx,
+ &non_1zst_elem,
+ xlen,
+ flen,
+ field1_kind,
+ field2_kind,
+ );
+ }
return Err(CannotUseFpConv);
}
}
diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs
index 8fab13d5d..5efd171b9 100644
--- a/compiler/rustc_target/src/abi/call/mod.rs
+++ b/compiler/rustc_target/src/abi/call/mod.rs
@@ -2,6 +2,7 @@ use crate::abi::{self, Abi, Align, FieldsShape, Size};
use crate::abi::{HasDataLayout, TyAbiInterface, TyAndLayout};
use crate::spec::{self, HasTargetSpec};
use rustc_span::Symbol;
+use std::fmt;
use std::str::FromStr;
mod aarch64;
@@ -36,23 +37,52 @@ pub enum PassMode {
Ignore,
/// Pass the argument directly.
///
- /// The argument has a layout abi of `Scalar`, `Vector` or in rare cases `Aggregate`.
+ /// The argument has a layout abi of `Scalar` or `Vector`.
+ /// Unfortunately due to past mistakes, in rare cases on wasm, it can also be `Aggregate`.
+ /// This is bad since it leaks LLVM implementation details into the ABI.
+ /// (Also see <https://github.com/rust-lang/rust/issues/115666>.)
Direct(ArgAttributes),
/// Pass a pair's elements directly in two arguments.
///
/// The argument has a layout abi of `ScalarPair`.
Pair(ArgAttributes, ArgAttributes),
- /// Pass the argument after casting it, to either a single uniform or a
- /// pair of registers. The bool indicates if a `Reg::i32()` dummy argument
- /// is emitted before the real argument.
- Cast(Box<CastTarget>, bool),
+ /// Pass the argument after casting it. See the `CastTarget` docs for details. The bool
+ /// indicates if a `Reg::i32()` dummy argument is emitted before the real argument.
+ Cast { pad_i32: bool, cast: Box<CastTarget> },
/// Pass the argument indirectly via a hidden pointer.
- /// The `extra_attrs` value, if any, is for the extra data (vtable or length)
- /// which indicates that it refers to an unsized rvalue.
- /// `on_stack` defines that the value should be passed at a fixed
- /// stack offset in accordance to the ABI rather than passed using a
- /// pointer. This corresponds to the `byval` LLVM argument attribute.
- Indirect { attrs: ArgAttributes, extra_attrs: Option<ArgAttributes>, on_stack: bool },
+ /// The `meta_attrs` value, if any, is for the metadata (vtable or length) of an unsized
+ /// argument. (This is the only mode that supports unsized arguments.)
+ /// `on_stack` defines that the value should be passed at a fixed stack offset in accordance to
+ /// the ABI rather than passed using a pointer. This corresponds to the `byval` LLVM argument
+ /// attribute (using the Rust type of this argument). `on_stack` cannot be true for unsized
+ /// arguments, i.e., when `meta_attrs` is `Some`.
+ Indirect { attrs: ArgAttributes, meta_attrs: Option<ArgAttributes>, on_stack: bool },
+}
+
+impl PassMode {
+ /// Checks if these two `PassMode` are equal enough to be considered "the same for all
+ /// function call ABIs". However, the `Layout` can also impact ABI decisions,
+ /// so that needs to be compared as well!
+ pub fn eq_abi(&self, other: &Self) -> bool {
+ match (self, other) {
+ (PassMode::Ignore, PassMode::Ignore) => true,
+ (PassMode::Direct(a1), PassMode::Direct(a2)) => a1.eq_abi(a2),
+ (PassMode::Pair(a1, b1), PassMode::Pair(a2, b2)) => a1.eq_abi(a2) && b1.eq_abi(b2),
+ (
+ PassMode::Cast { cast: c1, pad_i32: pad1 },
+ PassMode::Cast { cast: c2, pad_i32: pad2 },
+ ) => c1.eq_abi(c2) && pad1 == pad2,
+ (
+ PassMode::Indirect { attrs: a1, meta_attrs: None, on_stack: s1 },
+ PassMode::Indirect { attrs: a2, meta_attrs: None, on_stack: s2 },
+ ) => a1.eq_abi(a2) && s1 == s2,
+ (
+ PassMode::Indirect { attrs: a1, meta_attrs: Some(e1), on_stack: s1 },
+ PassMode::Indirect { attrs: a2, meta_attrs: Some(e2), on_stack: s2 },
+ ) => a1.eq_abi(a2) && e1.eq_abi(e2) && s1 == s2,
+ _ => false,
+ }
+ }
}
// Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
@@ -127,6 +157,24 @@ impl ArgAttributes {
pub fn contains(&self, attr: ArgAttribute) -> bool {
self.regular.contains(attr)
}
+
+ /// Checks if these two `ArgAttributes` are equal enough to be considered "the same for all
+ /// function call ABIs".
+ pub fn eq_abi(&self, other: &Self) -> bool {
+ // There's only one regular attribute that matters for the call ABI: InReg.
+ // Everything else is things like noalias, dereferenceable, nonnull, ...
+ // (This also applies to pointee_size, pointee_align.)
+ if self.regular.contains(ArgAttribute::InReg) != other.regular.contains(ArgAttribute::InReg)
+ {
+ return false;
+ }
+ // We also compare the sign extension mode -- this could let the callee make assumptions
+ // about bits that conceptually were not even passed.
+ if self.arg_ext != other.arg_ext {
+ return false;
+ }
+ return true;
+ }
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
@@ -211,6 +259,13 @@ impl Uniform {
}
}
+/// Describes the type used for `PassMode::Cast`.
+///
+/// Passing arguments in this mode works as follows: the registers in the `prefix` (the ones that
+/// are `Some`) get laid out one after the other (using `repr(C)` layout rules). Then the
+/// `rest.unit` register type gets repeated often enough to cover `rest.size`. This describes the
+/// actual type used for the call; the Rust type of the argument is then transmuted to this ABI type
+/// (and all data in the padding between the registers is dropped).
#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
pub struct CastTarget {
pub prefix: [Option<Reg>; 8],
@@ -272,6 +327,14 @@ impl CastTarget {
acc.max(align)
})
}
+
+ /// Checks if these two `CastTarget` are equal enough to be considered "the same for all
+ /// function call ABIs".
+ pub fn eq_abi(&self, other: &Self) -> bool {
+ let CastTarget { prefix: prefix_l, rest: rest_l, attrs: attrs_l } = self;
+ let CastTarget { prefix: prefix_r, rest: rest_r, attrs: attrs_r } = other;
+ prefix_l == prefix_r && rest_l == rest_r && attrs_l.eq_abi(attrs_r)
+ }
}
/// Return value from the `homogeneous_aggregate` test function.
@@ -330,8 +393,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
/// only a single type (e.g., `(u32, u32)`). Such aggregates are often
/// special-cased in ABIs.
///
- /// Note: We generally ignore fields of zero-sized type when computing
- /// this value (see #56877).
+ /// Note: We generally ignore 1-ZST fields when computing this value (see #56877).
///
/// This is public so that it can be used in unit tests, but
/// should generally only be relevant to the ABI details of
@@ -389,12 +451,18 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
let mut total = start;
for i in 0..layout.fields.count() {
+ let field = layout.field(cx, i);
+ if field.is_1zst() {
+ // No data here and no impact on layout, can be ignored.
+ // (We might be able to also ignore all aligned ZST but that's less clear.)
+ continue;
+ }
+
if !is_union && total != layout.fields.offset(i) {
+ // This field isn't just after the previous one we considered, abort.
return Err(Heterogeneous);
}
- let field = layout.field(cx, i);
-
result = result.merge(field.homogeneous_aggregate(cx)?)?;
// Keep track of the offset (without padding).
@@ -458,13 +526,22 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
/// Information about how to pass an argument to,
/// or return a value from, a function, under some ABI.
-#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+#[derive(Clone, PartialEq, Eq, Hash, HashStable_Generic)]
pub struct ArgAbi<'a, Ty> {
pub layout: TyAndLayout<'a, Ty>,
pub mode: PassMode,
}
+// Needs to be a custom impl because of the bounds on the `TyAndLayout` debug impl.
+impl<'a, Ty: fmt::Display> fmt::Debug for ArgAbi<'a, Ty> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let ArgAbi { layout, mode } = self;
+ f.debug_struct("ArgAbi").field("layout", layout).field("mode", mode).finish()
+ }
+}
+
impl<'a, Ty> ArgAbi<'a, Ty> {
+ /// This defines the "default ABI" for that type, that is then later adjusted in `fn_abi_adjust_for_abi`.
pub fn new(
cx: &impl HasDataLayout,
layout: TyAndLayout<'a, Ty>,
@@ -478,6 +555,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
),
Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
+ // The `Aggregate` ABI should always be adjusted later.
Abi::Aggregate { .. } => PassMode::Direct(ArgAttributes::new()),
};
ArgAbi { layout, mode }
@@ -497,15 +575,15 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
attrs.pointee_size = layout.size;
attrs.pointee_align = Some(layout.align.abi);
- let extra_attrs = layout.is_unsized().then_some(ArgAttributes::new());
+ let meta_attrs = layout.is_unsized().then_some(ArgAttributes::new());
- PassMode::Indirect { attrs, extra_attrs, on_stack: false }
+ PassMode::Indirect { attrs, meta_attrs, on_stack: false }
}
pub fn make_indirect(&mut self) {
match self.mode {
PassMode::Direct(_) | PassMode::Pair(_, _) => {}
- PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: false } => return,
+ PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: false } => return,
_ => panic!("Tried to make {:?} indirect", self.mode),
}
@@ -515,7 +593,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
pub fn make_indirect_byval(&mut self, byval_align: Option<Align>) {
self.make_indirect();
match self.mode {
- PassMode::Indirect { ref mut attrs, extra_attrs: _, ref mut on_stack } => {
+ PassMode::Indirect { ref mut attrs, meta_attrs: _, ref mut on_stack } => {
*on_stack = true;
// Some platforms, like 32-bit x86, change the alignment of the type when passing
@@ -548,11 +626,11 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
}
pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
- self.mode = PassMode::Cast(Box::new(target.into()), false);
+ self.mode = PassMode::Cast { cast: Box::new(target.into()), pad_i32: false };
}
pub fn cast_to_and_pad_i32<T: Into<CastTarget>>(&mut self, target: T, pad_i32: bool) {
- self.mode = PassMode::Cast(Box::new(target.into()), pad_i32);
+ self.mode = PassMode::Cast { cast: Box::new(target.into()), pad_i32 };
}
pub fn is_indirect(&self) -> bool {
@@ -560,16 +638,24 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
}
pub fn is_sized_indirect(&self) -> bool {
- matches!(self.mode, PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ })
+ matches!(self.mode, PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ })
}
pub fn is_unsized_indirect(&self) -> bool {
- matches!(self.mode, PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ })
+ matches!(self.mode, PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ })
}
pub fn is_ignore(&self) -> bool {
matches!(self.mode, PassMode::Ignore)
}
+
+ /// Checks if these two `ArgAbi` are equal enough to be considered "the same for all
+ /// function call ABIs".
+ pub fn eq_abi(&self, other: &Self) -> bool {
+ // Ideally we'd just compare the `mode`, but that is not enough -- for some modes LLVM will look
+ // at the type.
+ self.layout.eq_abi(&other.layout) && self.mode.eq_abi(&other.mode)
+ }
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
@@ -579,10 +665,9 @@ pub enum Conv {
C,
Rust,
- /// For things unlikely to be called, where smaller caller codegen is
- /// preferred over raw speed.
- /// Stronger than just `#[cold]` because `fn` pointers might be incompatible.
- RustCold,
+ Cold,
+ PreserveMost,
+ PreserveAll,
// Target-specific calling conventions.
ArmAapcs,
@@ -605,9 +690,7 @@ pub enum Conv {
AvrInterrupt,
AvrNonBlockingInterrupt,
- RiscvInterrupt {
- kind: RiscvInterruptKind,
- },
+ RiscvInterrupt { kind: RiscvInterruptKind },
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
@@ -630,7 +713,7 @@ impl RiscvInterruptKind {
///
/// I will do my best to describe this structure, but these
/// comments are reverse-engineered and may be inaccurate. -NDM
-#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+#[derive(Clone, PartialEq, Eq, Hash, HashStable_Generic)]
pub struct FnAbi<'a, Ty> {
/// The LLVM types of each argument.
pub args: Box<[ArgAbi<'a, Ty>]>,
@@ -651,6 +734,21 @@ pub struct FnAbi<'a, Ty> {
pub can_unwind: bool,
}
+// Needs to be a custom impl because of the bounds on the `TyAndLayout` debug impl.
+impl<'a, Ty: fmt::Display> fmt::Debug for FnAbi<'a, Ty> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let FnAbi { args, ret, c_variadic, fixed_count, conv, can_unwind } = self;
+ f.debug_struct("FnAbi")
+ .field("args", args)
+ .field("ret", ret)
+ .field("c_variadic", c_variadic)
+ .field("fixed_count", fixed_count)
+ .field("conv", conv)
+ .field("can_unwind", can_unwind)
+ .finish()
+ }
+}
+
/// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
#[derive(Copy, Clone, Debug, HashStable_Generic)]
pub enum AdjustForForeignAbiError {
diff --git a/compiler/rustc_target/src/abi/call/riscv.rs b/compiler/rustc_target/src/abi/call/riscv.rs
index d90dce2a0..93a204563 100644
--- a/compiler/rustc_target/src/abi/call/riscv.rs
+++ b/compiler/rustc_target/src/abi/call/riscv.rs
@@ -89,6 +89,17 @@ where
}
FieldsShape::Union(_) => {
if !arg_layout.is_zst() {
+ if arg_layout.is_transparent() {
+ let non_1zst_elem = arg_layout.non_1zst_field(cx).expect("not exactly one non-1-ZST field in non-ZST repr(transparent) union").1;
+ return should_use_fp_conv_helper(
+ cx,
+ &non_1zst_elem,
+ xlen,
+ flen,
+ field1_kind,
+ field2_kind,
+ );
+ }
return Err(CannotUseFpConv);
}
}
diff --git a/compiler/rustc_target/src/abi/call/wasm.rs b/compiler/rustc_target/src/abi/call/wasm.rs
index 0eb2309ec..796b752ff 100644
--- a/compiler/rustc_target/src/abi/call/wasm.rs
+++ b/compiler/rustc_target/src/abi/call/wasm.rs
@@ -61,6 +61,10 @@ where
/// The purpose of this ABI is for matching the WebAssembly standard. This
/// intentionally diverges from the C ABI and is specifically crafted to take
/// advantage of LLVM's support of multiple returns in WebAssembly.
+///
+/// This ABI is *bad*! It uses `PassMode::Direct` for `abi::Aggregate` types, which leaks LLVM
+/// implementation details into the ABI. It's just hard to fix because ABIs are hard to change.
+/// Also see <https://github.com/rust-lang/rust/issues/115666>.
pub fn compute_wasm_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
if !fn_abi.ret.is_ignore() {
classify_ret(&mut fn_abi.ret);
diff --git a/compiler/rustc_target/src/abi/call/x86.rs b/compiler/rustc_target/src/abi/call/x86.rs
index b738c3133..afa1b70ef 100644
--- a/compiler/rustc_target/src/abi/call/x86.rs
+++ b/compiler/rustc_target/src/abi/call/x86.rs
@@ -142,13 +142,13 @@ where
for arg in fn_abi.args.iter_mut() {
let attrs = match arg.mode {
PassMode::Ignore
- | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => {
continue;
}
PassMode::Direct(ref mut attrs) => attrs,
PassMode::Pair(..)
- | PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ }
- | PassMode::Cast(..) => {
+ | PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ }
+ | PassMode::Cast { .. } => {
unreachable!("x86 shouldn't be passing arguments by {:?}", arg.mode)
}
};
diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs
index dd435dbb0..74fe98920 100644
--- a/compiler/rustc_target/src/abi/mod.rs
+++ b/compiler/rustc_target/src/abi/mod.rs
@@ -3,6 +3,7 @@ pub use Primitive::*;
use crate::json::{Json, ToJson};
+use std::fmt;
use std::ops::Deref;
use rustc_macros::HashStable_Generic;
@@ -24,12 +25,22 @@ impl ToJson for Endian {
/// to that obtained from `layout_of(ty)`, as we need to produce
/// layouts for which Rust types do not exist, such as enum variants
/// or synthetic fields of enums (i.e., discriminants) and fat pointers.
-#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable_Generic)]
+#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
pub struct TyAndLayout<'a, Ty> {
pub ty: Ty,
pub layout: Layout<'a>,
}
+impl<'a, Ty: fmt::Display> fmt::Debug for TyAndLayout<'a, Ty> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Print the type in a readable way, not its debug representation.
+ f.debug_struct("TyAndLayout")
+ .field("ty", &format_args!("{}", self.ty))
+ .field("layout", &self.layout)
+ .finish()
+ }
+}
+
impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
type Target = &'a LayoutS;
fn deref(&self) -> &&'a LayoutS {
@@ -55,6 +66,7 @@ pub trait TyAbiInterface<'a, C>: Sized + std::fmt::Debug {
fn is_never(this: TyAndLayout<'a, Self>) -> bool;
fn is_tuple(this: TyAndLayout<'a, Self>) -> bool;
fn is_unit(this: TyAndLayout<'a, Self>) -> bool;
+ fn is_transparent(this: TyAndLayout<'a, Self>) -> bool;
}
impl<'a, Ty> TyAndLayout<'a, Ty> {
@@ -125,6 +137,13 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
Ty::is_unit(self)
}
+ pub fn is_transparent<C>(self) -> bool
+ where
+ Ty: TyAbiInterface<'a, C>,
+ {
+ Ty::is_transparent(self)
+ }
+
pub fn offset_of_subfield<C>(self, cx: &C, indices: impl Iterator<Item = usize>) -> Size
where
Ty: TyAbiInterface<'a, C>,
@@ -144,4 +163,25 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
offset
}
+
+ /// Finds the one field that is not a 1-ZST.
+ /// Returns `None` if there are multiple non-1-ZST fields or only 1-ZST-fields.
+ pub fn non_1zst_field<C>(&self, cx: &C) -> Option<(usize, Self)>
+ where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ {
+ let mut found = None;
+ for field_idx in 0..self.fields.count() {
+ let field = self.field(cx, field_idx);
+ if field.is_1zst() {
+ continue;
+ }
+ if found.is_some() {
+ // More than one non-1-ZST field.
+ return None;
+ }
+ found = Some((field_idx, field));
+ }
+ found
+ }
}
diff --git a/compiler/rustc_target/src/json.rs b/compiler/rustc_target/src/json.rs
index af455b643..c61351490 100644
--- a/compiler/rustc_target/src/json.rs
+++ b/compiler/rustc_target/src/json.rs
@@ -96,7 +96,9 @@ impl ToJson for crate::abi::call::Conv {
let s = match self {
Self::C => "C",
Self::Rust => "Rust",
- Self::RustCold => "RustCold",
+ Self::Cold => "Cold",
+ Self::PreserveMost => "PreserveMost",
+ Self::PreserveAll => "PreserveAll",
Self::ArmAapcs => "ArmAapcs",
Self::CCmseNonSecureCall => "CCmseNonSecureCall",
Self::Msp430Intr => "Msp430Intr",
diff --git a/compiler/rustc_target/src/lib.rs b/compiler/rustc_target/src/lib.rs
index b52002b12..e838e1113 100644
--- a/compiler/rustc_target/src/lib.rs
+++ b/compiler/rustc_target/src/lib.rs
@@ -19,7 +19,7 @@
#![feature(step_trait)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
use std::path::{Path, PathBuf};
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs b/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs
index e2df7e0bd..b29ab14e7 100644
--- a/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs
+++ b/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs
@@ -1,5 +1,5 @@
use super::apple_base::{opts, Arch};
-use crate::spec::{Cc, FramePointer, LinkerFlavor, Lld, Target, TargetOptions};
+use crate::spec::{Cc, FramePointer, LinkerFlavor, Lld, SanitizerSet, Target, TargetOptions};
pub fn target() -> Target {
let llvm_target = "arm64-apple-ios14.0-macabi";
@@ -7,6 +7,7 @@ pub fn target() -> Target {
let arch = Arch::Arm64_macabi;
let mut base = opts("ios", arch);
base.add_pre_link_args(LinkerFlavor::Darwin(Cc::Yes, Lld::No), &["-target", llvm_target]);
+ base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::LEAK | SanitizerSet::THREAD;
Target {
llvm_target: llvm_target.into(),
diff --git a/compiler/rustc_target/src/spec/abi.rs b/compiler/rustc_target/src/spec/abi.rs
index 550cdf6bd..a99cccd42 100644
--- a/compiler/rustc_target/src/spec/abi.rs
+++ b/compiler/rustc_target/src/spec/abi.rs
@@ -14,15 +14,33 @@ pub enum Abi {
// hashing tests. These are used in many places, so giving them stable values reduces test
// churn. The specific values are meaningless.
Rust,
- C { unwind: bool },
- Cdecl { unwind: bool },
- Stdcall { unwind: bool },
- Fastcall { unwind: bool },
- Vectorcall { unwind: bool },
- Thiscall { unwind: bool },
- Aapcs { unwind: bool },
- Win64 { unwind: bool },
- SysV64 { unwind: bool },
+ C {
+ unwind: bool,
+ },
+ Cdecl {
+ unwind: bool,
+ },
+ Stdcall {
+ unwind: bool,
+ },
+ Fastcall {
+ unwind: bool,
+ },
+ Vectorcall {
+ unwind: bool,
+ },
+ Thiscall {
+ unwind: bool,
+ },
+ Aapcs {
+ unwind: bool,
+ },
+ Win64 {
+ unwind: bool,
+ },
+ SysV64 {
+ unwind: bool,
+ },
PtxKernel,
Msp430Interrupt,
X86Interrupt,
@@ -32,11 +50,16 @@ pub enum Abi {
AvrNonBlockingInterrupt,
CCmseNonSecureCall,
Wasm,
- System { unwind: bool },
+ System {
+ unwind: bool,
+ },
RustIntrinsic,
RustCall,
PlatformIntrinsic,
Unadjusted,
+ /// For things unlikely to be called, where reducing register pressure in
+ /// `extern "Rust"` callers is worth paying extra cost in the callee.
+ /// Stronger than just `#[cold]` because `fn` pointers might be incompatible.
RustCold,
RiscvInterruptM,
RiscvInterruptS,
@@ -45,7 +68,7 @@ pub enum Abi {
impl Abi {
pub fn supports_varargs(self) -> bool {
// * C and Cdecl obviously support varargs.
- // * C can be based on SysV64 or Win64, so they must support varargs.
+ // * C can be based on Aapcs, SysV64 or Win64, so they must support varargs.
// * EfiApi is based on Win64 or C, so it also supports it.
//
// * Stdcall does not, because it would be impossible for the callee to clean
@@ -56,6 +79,7 @@ impl Abi {
match self {
Self::C { .. }
| Self::Cdecl { .. }
+ | Self::Aapcs { .. }
| Self::Win64 { .. }
| Self::SysV64 { .. }
| Self::EfiApi => true,
diff --git a/compiler/rustc_target/src/spec/apple_base.rs b/compiler/rustc_target/src/spec/apple_base.rs
index 8a8d1ab95..7a666eea4 100644
--- a/compiler/rustc_target/src/spec/apple_base.rs
+++ b/compiler/rustc_target/src/spec/apple_base.rs
@@ -11,7 +11,6 @@ use Arch::*;
#[allow(non_camel_case_types)]
#[derive(Copy, Clone)]
pub enum Arch {
- Armv7,
Armv7k,
Armv7s,
Arm64,
@@ -29,7 +28,6 @@ pub enum Arch {
impl Arch {
pub fn target_name(self) -> &'static str {
match self {
- Armv7 => "armv7",
Armv7k => "armv7k",
Armv7s => "armv7s",
Arm64 | Arm64_macabi | Arm64_sim => "arm64",
@@ -43,7 +41,7 @@ impl Arch {
pub fn target_arch(self) -> Cow<'static, str> {
Cow::Borrowed(match self {
- Armv7 | Armv7k | Armv7s => "arm",
+ Armv7k | Armv7s => "arm",
Arm64 | Arm64_32 | Arm64_macabi | Arm64_sim => "aarch64",
I386 | I686 => "x86",
X86_64 | X86_64_sim | X86_64_macabi | X86_64h => "x86_64",
@@ -52,7 +50,7 @@ impl Arch {
fn target_abi(self) -> &'static str {
match self {
- Armv7 | Armv7k | Armv7s | Arm64 | Arm64_32 | I386 | I686 | X86_64 | X86_64h => "",
+ Armv7k | Armv7s | Arm64 | Arm64_32 | I386 | I686 | X86_64 | X86_64h => "",
X86_64_macabi | Arm64_macabi => "macabi",
// x86_64-apple-ios is a simulator target, even though it isn't
// declared that way in the target like the other ones...
@@ -62,18 +60,20 @@ impl Arch {
fn target_cpu(self) -> &'static str {
match self {
- Armv7 => "cortex-a8", // iOS7 is supported on iPhone 4 and higher
Armv7k => "cortex-a8",
- Armv7s => "cortex-a9",
+ Armv7s => "swift", // iOS 10 is only supported on iPhone 5 or higher.
Arm64 => "apple-a7",
Arm64_32 => "apple-s4",
- I386 | I686 => "yonah",
- X86_64 | X86_64_sim => "core2",
+ // Only macOS 10.12+ is supported, which means
+ // all x86_64/x86 CPUs must be running at least penryn
+ // https://github.com/llvm/llvm-project/blob/01f924d0e37a5deae51df0d77e10a15b63aa0c0f/clang/lib/Driver/ToolChains/Arch/X86.cpp#L79-L82
+ I386 | I686 => "penryn",
+ X86_64 | X86_64_sim => "penryn",
+ X86_64_macabi => "penryn",
// Note: `core-avx2` is slightly more advanced than `x86_64h`, see
// comments (and disabled features) in `x86_64h_apple_darwin` for
- // details.
+ // details. It is a higher baseline then `penryn` however.
X86_64h => "core-avx2",
- X86_64_macabi => "core2",
Arm64_macabi => "apple-a12",
Arm64_sim => "apple-a12",
}
@@ -115,21 +115,6 @@ fn pre_link_args(os: &'static str, arch: Arch, abi: &'static str) -> LinkArgs {
}
pub fn opts(os: &'static str, arch: Arch) -> TargetOptions {
- // Static TLS is only available in macOS 10.7+. If you try to compile for 10.6
- // either the linker will complain if it is used or the binary will end up
- // segfaulting at runtime when run on 10.6. Rust by default supports macOS
- // 10.7+, but there is a standard environment variable,
- // MACOSX_DEPLOYMENT_TARGET, which is used to signal targeting older
- // versions of macOS. For example compiling on 10.10 with
- // MACOSX_DEPLOYMENT_TARGET set to 10.6 will cause the linker to generate
- // warnings about the usage of static TLS.
- //
- // Here we detect what version is being requested, defaulting to 10.7. Static
- // TLS is flagged as enabled if it looks to be supported. The architecture
- // only matters for default deployment target which is 11.0 for ARM64 and
- // 10.7 for everything else.
- let has_thread_local = os == "macos" && macos_deployment_target(Arch::X86_64) >= (10, 7);
-
let abi = arch.target_abi();
TargetOptions {
@@ -145,12 +130,17 @@ pub fn opts(os: &'static str, arch: Arch) -> TargetOptions {
pre_link_args: pre_link_args(os, arch, abi),
families: cvs!["unix"],
is_like_osx: true,
- default_dwarf_version: 2,
+ // LLVM notes that macOS 10.11+ and iOS 9+ default
+ // to v4, so we do the same.
+ // https://github.com/llvm/llvm-project/blob/378778a0d10c2f8d5df8ceff81f95b6002984a4b/clang/lib/Driver/ToolChains/Darwin.cpp#L1203
+ default_dwarf_version: 4,
frame_pointer: FramePointer::Always,
has_rpath: true,
dll_suffix: ".dylib".into(),
archive_format: "darwin".into(),
- has_thread_local,
+ // Thread locals became available with iOS 8 and macOS 10.7,
+ // and both are far below our minimum.
+ has_thread_local: true,
abi_return_struct_as_int: true,
emit_debug_gdb_scripts: false,
eh_frame_header: false,
@@ -179,20 +169,52 @@ pub fn opts(os: &'static str, arch: Arch) -> TargetOptions {
}
}
-pub fn deployment_target(target: &Target) -> Option<String> {
+pub fn sdk_version(platform: u32) -> Option<(u32, u32)> {
+ // NOTE: These values are from an arbitrary point in time but shouldn't make it into the final
+ // binary since the final link command will have the current SDK version passed to it.
+ match platform {
+ object::macho::PLATFORM_MACOS => Some((13, 1)),
+ object::macho::PLATFORM_IOS
+ | object::macho::PLATFORM_IOSSIMULATOR
+ | object::macho::PLATFORM_TVOS
+ | object::macho::PLATFORM_TVOSSIMULATOR
+ | object::macho::PLATFORM_MACCATALYST => Some((16, 2)),
+ object::macho::PLATFORM_WATCHOS | object::macho::PLATFORM_WATCHOSSIMULATOR => Some((9, 1)),
+ _ => None,
+ }
+}
+
+pub fn platform(target: &Target) -> Option<u32> {
+ Some(match (&*target.os, &*target.abi) {
+ ("macos", _) => object::macho::PLATFORM_MACOS,
+ ("ios", "macabi") => object::macho::PLATFORM_MACCATALYST,
+ ("ios", "sim") => object::macho::PLATFORM_IOSSIMULATOR,
+ ("ios", _) => object::macho::PLATFORM_IOS,
+ ("watchos", "sim") => object::macho::PLATFORM_WATCHOSSIMULATOR,
+ ("watchos", _) => object::macho::PLATFORM_WATCHOS,
+ ("tvos", "sim") => object::macho::PLATFORM_TVOSSIMULATOR,
+ ("tvos", _) => object::macho::PLATFORM_TVOS,
+ _ => return None,
+ })
+}
+
+pub fn deployment_target(target: &Target) -> Option<(u32, u32)> {
let (major, minor) = match &*target.os {
"macos" => {
// This does not need to be specific. It just needs to handle x86 vs M1.
let arch = if target.arch == "x86" || target.arch == "x86_64" { X86_64 } else { Arm64 };
macos_deployment_target(arch)
}
- "ios" => ios_deployment_target(),
+ "ios" => match &*target.abi {
+ "macabi" => mac_catalyst_deployment_target(),
+ _ => ios_deployment_target(),
+ },
"watchos" => watchos_deployment_target(),
"tvos" => tvos_deployment_target(),
_ => return None,
};
- Some(format!("{major}.{minor}"))
+ Some((major, minor))
}
fn from_set_deployment_target(var_name: &str) -> Option<(u32, u32)> {
@@ -207,9 +229,7 @@ fn macos_default_deployment_target(arch: Arch) -> (u32, u32) {
match arch {
// Note: Arm64_sim is not included since macOS has no simulator.
Arm64 | Arm64_macabi => (11, 0),
- // x86_64h-apple-darwin only supports macOS 10.8 and later
- X86_64h => (10, 8),
- _ => (10, 7),
+ _ => (10, 12),
}
}
@@ -260,8 +280,8 @@ fn link_env_remove(arch: Arch, os: &'static str) -> StaticCow<[StaticCow<str>]>
// Otherwise if cross-compiling for a different OS/SDK, remove any part
// of the linking environment that's wrong and reversed.
match arch {
- Armv7 | Armv7k | Armv7s | Arm64 | Arm64_32 | I386 | I686 | X86_64 | X86_64_sim
- | X86_64h | Arm64_sim => {
+ Armv7k | Armv7s | Arm64 | Arm64_32 | I386 | I686 | X86_64 | X86_64_sim | X86_64h
+ | Arm64_sim => {
cvs!["MACOSX_DEPLOYMENT_TARGET"]
}
X86_64_macabi | Arm64_macabi => cvs!["IPHONEOS_DEPLOYMENT_TARGET"],
@@ -271,7 +291,12 @@ fn link_env_remove(arch: Arch, os: &'static str) -> StaticCow<[StaticCow<str>]>
fn ios_deployment_target() -> (u32, u32) {
// If you are looking for the default deployment target, prefer `rustc --print deployment-target`.
- from_set_deployment_target("IPHONEOS_DEPLOYMENT_TARGET").unwrap_or((7, 0))
+ from_set_deployment_target("IPHONEOS_DEPLOYMENT_TARGET").unwrap_or((10, 0))
+}
+
+fn mac_catalyst_deployment_target() -> (u32, u32) {
+ // If you are looking for the default deployment target, prefer `rustc --print deployment-target`.
+ from_set_deployment_target("IPHONEOS_DEPLOYMENT_TARGET").unwrap_or((14, 0))
}
pub fn ios_llvm_target(arch: Arch) -> String {
@@ -297,7 +322,7 @@ pub fn ios_sim_llvm_target(arch: Arch) -> String {
fn tvos_deployment_target() -> (u32, u32) {
// If you are looking for the default deployment target, prefer `rustc --print deployment-target`.
- from_set_deployment_target("TVOS_DEPLOYMENT_TARGET").unwrap_or((7, 0))
+ from_set_deployment_target("TVOS_DEPLOYMENT_TARGET").unwrap_or((10, 0))
}
fn tvos_lld_platform_version() -> String {
diff --git a/compiler/rustc_target/src/spec/armv7_apple_ios.rs b/compiler/rustc_target/src/spec/armv7_apple_ios.rs
deleted file mode 100644
index 3259c8547..000000000
--- a/compiler/rustc_target/src/spec/armv7_apple_ios.rs
+++ /dev/null
@@ -1,21 +0,0 @@
-use super::apple_base::{ios_llvm_target, opts, Arch};
-use crate::spec::{Target, TargetOptions};
-
-pub fn target() -> Target {
- let arch = Arch::Armv7;
- Target {
- // Clang automatically chooses a more specific target based on
- // IPHONEOS_DEPLOYMENT_TARGET.
- // This is required for the target to pick the right
- // MACH-O commands, so we do too.
- llvm_target: ios_llvm_target(arch).into(),
- pointer_width: 32,
- data_layout: "e-m:o-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".into(),
- arch: arch.target_arch(),
- options: TargetOptions {
- features: "+v7,+vfp3,+neon".into(),
- max_atomic_width: Some(64),
- ..opts("ios", arch)
- },
- }
-}
diff --git a/compiler/rustc_target/src/spec/armv7s_apple_ios.rs b/compiler/rustc_target/src/spec/armv7s_apple_ios.rs
index be4bc6758..be7f8542c 100644
--- a/compiler/rustc_target/src/spec/armv7s_apple_ios.rs
+++ b/compiler/rustc_target/src/spec/armv7s_apple_ios.rs
@@ -1,10 +1,10 @@
-use super::apple_base::{opts, Arch};
+use super::apple_base::{ios_llvm_target, opts, Arch};
use crate::spec::{Target, TargetOptions};
pub fn target() -> Target {
let arch = Arch::Armv7s;
Target {
- llvm_target: "armv7s-apple-ios".into(),
+ llvm_target: ios_llvm_target(arch).into(),
pointer_width: 32,
data_layout: "e-m:o-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".into(),
arch: arch.target_arch(),
diff --git a/compiler/rustc_target/src/spec/hurd_base.rs b/compiler/rustc_target/src/spec/hurd_base.rs
new file mode 100644
index 000000000..76f8223c0
--- /dev/null
+++ b/compiler/rustc_target/src/spec/hurd_base.rs
@@ -0,0 +1,15 @@
+use crate::spec::{cvs, RelroLevel, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ TargetOptions {
+ os: "hurd".into(),
+ dynamic_linking: true,
+ families: cvs!["unix"],
+ has_rpath: true,
+ position_independent_executables: true,
+ relro_level: RelroLevel::Full,
+ has_thread_local: true,
+ crt_static_respected: true,
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/hurd_gnu_base.rs b/compiler/rustc_target/src/spec/hurd_gnu_base.rs
new file mode 100644
index 000000000..b9cf26d93
--- /dev/null
+++ b/compiler/rustc_target/src/spec/hurd_gnu_base.rs
@@ -0,0 +1,5 @@
+use crate::spec::TargetOptions;
+
+pub fn opts() -> TargetOptions {
+ TargetOptions { env: "gnu".into(), ..super::hurd_base::opts() }
+}
diff --git a/compiler/rustc_target/src/spec/i686_pc_windows_gnullvm.rs b/compiler/rustc_target/src/spec/i686_pc_windows_gnullvm.rs
new file mode 100644
index 000000000..3154b512a
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i686_pc_windows_gnullvm.rs
@@ -0,0 +1,26 @@
+use crate::spec::{Cc, FramePointer, LinkerFlavor, Lld, Target};
+
+pub fn target() -> Target {
+ let mut base = super::windows_gnullvm_base::opts();
+ base.cpu = "pentium4".into();
+ base.max_atomic_width = Some(64);
+ base.frame_pointer = FramePointer::Always; // Required for backtraces
+ base.linker = Some("i686-w64-mingw32-clang".into());
+
+ // Mark all dynamic libraries and executables as compatible with the larger 4GiB address
+ // space available to x86 Windows binaries on x86_64.
+ base.add_pre_link_args(
+ LinkerFlavor::Gnu(Cc::No, Lld::No),
+ &["-m", "i386pe", "--large-address-aware"],
+ );
+
+ Target {
+ llvm_target: "i686-pc-windows-gnu".into(),
+ pointer_width: 32,
+ data_layout: "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
+ i64:64-f80:32-n8:16:32-a:0:32-S32"
+ .into(),
+ arch: "x86".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/i686_unknown_hurd_gnu.rs b/compiler/rustc_target/src/spec/i686_unknown_hurd_gnu.rs
new file mode 100644
index 000000000..29f803601
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i686_unknown_hurd_gnu.rs
@@ -0,0 +1,19 @@
+use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::hurd_gnu_base::opts();
+ base.cpu = "pentiumpro".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m32"]);
+ base.stack_probes = StackProbeType::InlineOrCall { min_llvm_version_for_inline: (11, 0, 1) };
+
+ Target {
+ llvm_target: "i686-unknown-hurd-gnu".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
+ f64:32:64-f80:32-n8:16:32-S128"
+ .into(),
+ arch: "x86".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs
index 31b6961bb..1bcb1f353 100644
--- a/compiler/rustc_target/src/spec/mod.rs
+++ b/compiler/rustc_target/src/spec/mod.rs
@@ -61,6 +61,8 @@ mod aix_base;
mod android_base;
mod apple_base;
pub use apple_base::deployment_target as current_apple_deployment_target;
+pub use apple_base::platform as current_apple_platform;
+pub use apple_base::sdk_version as current_apple_sdk_version;
mod avr_gnu_base;
pub use avr_gnu_base::ef_avr_arch;
mod bpf_base;
@@ -69,6 +71,8 @@ mod freebsd_base;
mod fuchsia_base;
mod haiku_base;
mod hermit_base;
+mod hurd_base;
+mod hurd_gnu_base;
mod illumos_base;
mod l4re_base;
mod linux_base;
@@ -1365,6 +1369,8 @@ supported_targets! {
("i686-unknown-haiku", i686_unknown_haiku),
("x86_64-unknown-haiku", x86_64_unknown_haiku),
+ ("i686-unknown-hurd-gnu", i686_unknown_hurd_gnu),
+
("aarch64-apple-darwin", aarch64_apple_darwin),
("x86_64-apple-darwin", x86_64_apple_darwin),
("x86_64h-apple-darwin", x86_64h_apple_darwin),
@@ -1388,7 +1394,6 @@ supported_targets! {
("i386-apple-ios", i386_apple_ios),
("x86_64-apple-ios", x86_64_apple_ios),
("aarch64-apple-ios", aarch64_apple_ios),
- ("armv7-apple-ios", armv7_apple_ios),
("armv7s-apple-ios", armv7s_apple_ios),
("x86_64-apple-ios-macabi", x86_64_apple_ios_macabi),
("aarch64-apple-ios-macabi", aarch64_apple_ios_macabi),
@@ -1418,6 +1423,7 @@ supported_targets! {
("x86_64-uwp-windows-gnu", x86_64_uwp_windows_gnu),
("aarch64-pc-windows-gnullvm", aarch64_pc_windows_gnullvm),
+ ("i686-pc-windows-gnullvm", i686_pc_windows_gnullvm),
("x86_64-pc-windows-gnullvm", x86_64_pc_windows_gnullvm),
("aarch64-pc-windows-msvc", aarch64_pc_windows_msvc),
@@ -2276,6 +2282,13 @@ impl Target {
Abi::Vectorcall { .. } if ["x86", "x86_64"].contains(&&self.arch[..]) => abi,
Abi::Fastcall { unwind } | Abi::Vectorcall { unwind } => Abi::C { unwind },
+ // The Windows x64 calling convention we use for `extern "Rust"`
+ // <https://learn.microsoft.com/en-us/cpp/build/x64-software-conventions#register-volatility-and-preservation>
+ // expects the callee to save `xmm6` through `xmm15`, but `PreserveMost`
+ // (that we use by default for `extern "rust-cold"`) doesn't save any of those.
+ // So to avoid bloating callers, just use the Rust convention here.
+ Abi::RustCold if self.is_like_windows && self.arch == "x86_64" => Abi::Rust,
+
abi => abi,
}
}
diff --git a/compiler/rustc_target/src/spec/riscv64_linux_android.rs b/compiler/rustc_target/src/spec/riscv64_linux_android.rs
index af0d68554..91f5e562d 100644
--- a/compiler/rustc_target/src/spec/riscv64_linux_android.rs
+++ b/compiler/rustc_target/src/spec/riscv64_linux_android.rs
@@ -9,7 +9,7 @@ pub fn target() -> Target {
options: TargetOptions {
code_model: Some(CodeModel::Medium),
cpu: "generic-rv64".into(),
- features: "+m,+a,+f,+d,+c".into(),
+ features: "+m,+a,+f,+d,+c,+Zba,+Zbb,+Zbs".into(),
llvm_abiname: "lp64d".into(),
supported_sanitizers: SanitizerSet::ADDRESS,
max_atomic_width: Some(64),
diff --git a/compiler/rustc_target/src/spec/uefi_msvc_base.rs b/compiler/rustc_target/src/spec/uefi_msvc_base.rs
index 8968d3c8f..a50a55ad7 100644
--- a/compiler/rustc_target/src/spec/uefi_msvc_base.rs
+++ b/compiler/rustc_target/src/spec/uefi_msvc_base.rs
@@ -46,6 +46,7 @@ pub fn opts() -> TargetOptions {
stack_probes: StackProbeType::Call,
singlethread: true,
linker: Some("rust-lld".into()),
+ entry_name: "efi_main".into(),
..base
}
}
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs b/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs
index e90bda9c9..e3f5d7321 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs
@@ -5,7 +5,7 @@ use crate::spec::{StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let arch = Arch::X86_64;
let mut base = opts("macos", arch);
- base.max_atomic_width = Some(128); // core2 supports cmpxchg16b
+ base.max_atomic_width = Some(128); // penryn+ supports cmpxchg16b
base.frame_pointer = FramePointer::Always;
base.add_pre_link_args(LinkerFlavor::Darwin(Cc::Yes, Lld::No), &["-m64"]);
base.stack_probes = StackProbeType::X86;
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs b/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs
index 50f359c35..fd1926f29 100644
--- a/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs
+++ b/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs
@@ -1,5 +1,5 @@
use super::apple_base::{opts, Arch};
-use crate::spec::{Cc, LinkerFlavor, Lld, StackProbeType, Target, TargetOptions};
+use crate::spec::{Cc, LinkerFlavor, Lld, SanitizerSet, StackProbeType, Target, TargetOptions};
pub fn target() -> Target {
let llvm_target = "x86_64-apple-ios14.0-macabi";
@@ -7,6 +7,7 @@ pub fn target() -> Target {
let arch = Arch::X86_64_macabi;
let mut base = opts("ios", arch);
base.add_pre_link_args(LinkerFlavor::Darwin(Cc::Yes, Lld::No), &["-target", llvm_target]);
+ base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::LEAK | SanitizerSet::THREAD;
Target {
llvm_target: llvm_target.into(),
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_uefi.rs b/compiler/rustc_target/src/spec/x86_64_unknown_uefi.rs
index 67664a747..41ba76806 100644
--- a/compiler/rustc_target/src/spec/x86_64_unknown_uefi.rs
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_uefi.rs
@@ -5,13 +5,14 @@
// The win64 ABI is used. It differs from the sysv64 ABI, so we must use a windows target with
// LLVM. "x86_64-unknown-windows" is used to get the minimal subset of windows-specific features.
-use crate::spec::Target;
+use crate::{abi::call::Conv, spec::Target};
pub fn target() -> Target {
let mut base = super::uefi_msvc_base::opts();
base.cpu = "x86-64".into();
base.plt_by_default = false;
base.max_atomic_width = Some(64);
+ base.entry_abi = Conv::X86_64Win64;
// We disable MMX and SSE for now, even though UEFI allows using them. Problem is, you have to
// enable these CPU features explicitly before their first use, otherwise their instructions
diff --git a/compiler/rustc_trait_selection/messages.ftl b/compiler/rustc_trait_selection/messages.ftl
index f4c9dfa34..20253b32a 100644
--- a/compiler/rustc_trait_selection/messages.ftl
+++ b/compiler/rustc_trait_selection/messages.ftl
@@ -27,6 +27,8 @@ trait_selection_inherent_projection_normalization_overflow = overflow evaluating
trait_selection_invalid_on_clause_in_rustc_on_unimplemented = invalid `on`-clause in `#[rustc_on_unimplemented]`
.label = invalid on-clause here
+trait_selection_malformed_on_unimplemented_attr = malformed `on_unimplemented` attribute
+
trait_selection_negative_positive_conflict = found both positive and negative implementation of trait `{$trait_desc}`{$self_desc ->
[none] {""}
*[default] {" "}for type `{$self_desc}`
@@ -40,4 +42,7 @@ trait_selection_no_value_in_rustc_on_unimplemented = this attribute must have a
.label = expected value here
.note = eg `#[rustc_on_unimplemented(message="foo")]`
+trait_selection_trait_has_no_impls = this trait has no implementations, consider adding one
+
+trait_selection_ty_alias_overflow = in case this is a recursive type alias, consider using a struct, enum, or union instead
trait_selection_unable_to_construct_constant_value = unable to construct a constant value for the unevaluated constant {$unevaluated}
diff --git a/compiler/rustc_trait_selection/src/solve/alias_relate.rs b/compiler/rustc_trait_selection/src/solve/alias_relate.rs
index 6b839d64b..f7031c5f4 100644
--- a/compiler/rustc_trait_selection/src/solve/alias_relate.rs
+++ b/compiler/rustc_trait_selection/src/solve/alias_relate.rs
@@ -125,7 +125,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
direction: ty::AliasRelationDirection,
invert: Invert,
) -> QueryResult<'tcx> {
- self.probe_candidate("normalizes-to").enter(|ecx| {
+ self.probe_misc_candidate("normalizes-to").enter(|ecx| {
ecx.normalizes_to_inner(param_env, alias, other, direction, invert)?;
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
})
@@ -175,7 +175,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
alias_rhs: ty::AliasTy<'tcx>,
direction: ty::AliasRelationDirection,
) -> QueryResult<'tcx> {
- self.probe_candidate("args relate").enter(|ecx| {
+ self.probe_misc_candidate("args relate").enter(|ecx| {
match direction {
ty::AliasRelationDirection::Equate => {
ecx.eq(param_env, alias_lhs, alias_rhs)?;
@@ -196,7 +196,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
rhs: ty::Term<'tcx>,
direction: ty::AliasRelationDirection,
) -> QueryResult<'tcx> {
- self.probe_candidate("bidir normalizes-to").enter(|ecx| {
+ self.probe_misc_candidate("bidir normalizes-to").enter(|ecx| {
ecx.normalizes_to_inner(
param_env,
lhs.to_alias_ty(ecx.tcx()).unwrap(),
diff --git a/compiler/rustc_trait_selection/src/solve/assembly/mod.rs b/compiler/rustc_trait_selection/src/solve/assembly/mod.rs
index 36194f973..23d2c0c4e 100644
--- a/compiler/rustc_trait_selection/src/solve/assembly/mod.rs
+++ b/compiler/rustc_trait_selection/src/solve/assembly/mod.rs
@@ -5,8 +5,10 @@ use crate::traits::coherence;
use rustc_hir::def_id::DefId;
use rustc_infer::traits::query::NoSolution;
use rustc_infer::traits::Reveal;
-use rustc_middle::traits::solve::inspect::CandidateKind;
-use rustc_middle::traits::solve::{CanonicalResponse, Certainty, Goal, QueryResult};
+use rustc_middle::traits::solve::inspect::ProbeKind;
+use rustc_middle::traits::solve::{
+ CandidateSource, CanonicalResponse, Certainty, Goal, QueryResult,
+};
use rustc_middle::traits::BuiltinImplSource;
use rustc_middle::ty::fast_reject::{SimplifiedType, TreatParams};
use rustc_middle::ty::{self, Ty, TyCtxt};
@@ -27,66 +29,6 @@ pub(super) struct Candidate<'tcx> {
pub(super) result: CanonicalResponse<'tcx>,
}
-/// Possible ways the given goal can be proven.
-#[derive(Debug, Clone, Copy)]
-pub(super) enum CandidateSource {
- /// A user written impl.
- ///
- /// ## Examples
- ///
- /// ```rust
- /// fn main() {
- /// let x: Vec<u32> = Vec::new();
- /// // This uses the impl from the standard library to prove `Vec<T>: Clone`.
- /// let y = x.clone();
- /// }
- /// ```
- Impl(DefId),
- /// A builtin impl generated by the compiler. When adding a new special
- /// trait, try to use actual impls whenever possible. Builtin impls should
- /// only be used in cases where the impl cannot be manually be written.
- ///
- /// Notable examples are auto traits, `Sized`, and `DiscriminantKind`.
- /// For a list of all traits with builtin impls, check out the
- /// [`EvalCtxt::assemble_builtin_impl_candidates`] method. Not
- BuiltinImpl(BuiltinImplSource),
- /// An assumption from the environment.
- ///
- /// More precisely we've used the `n-th` assumption in the `param_env`.
- ///
- /// ## Examples
- ///
- /// ```rust
- /// fn is_clone<T: Clone>(x: T) -> (T, T) {
- /// // This uses the assumption `T: Clone` from the `where`-bounds
- /// // to prove `T: Clone`.
- /// (x.clone(), x)
- /// }
- /// ```
- ParamEnv(usize),
- /// If the self type is an alias type, e.g. an opaque type or a projection,
- /// we know the bounds on that alias to hold even without knowing its concrete
- /// underlying type.
- ///
- /// More precisely this candidate is using the `n-th` bound in the `item_bounds` of
- /// the self type.
- ///
- /// ## Examples
- ///
- /// ```rust
- /// trait Trait {
- /// type Assoc: Clone;
- /// }
- ///
- /// fn foo<T: Trait>(x: <T as Trait>::Assoc) {
- /// // We prove `<T as Trait>::Assoc` by looking at the bounds on `Assoc` in
- /// // in the trait definition.
- /// let _y = x.clone();
- /// }
- /// ```
- AliasBound,
-}
-
/// Methods used to assemble candidates for either trait or projection goals.
pub(super) trait GoalKind<'tcx>:
TypeFoldable<TyCtxt<'tcx>> + Copy + Eq + std::fmt::Display
@@ -399,7 +341,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
let tcx = self.tcx();
let &ty::Alias(_, projection_ty) = goal.predicate.self_ty().kind() else { return };
- candidates.extend(self.probe(|_| CandidateKind::NormalizedSelfTyAssembly).enter(|ecx| {
+ candidates.extend(self.probe(|_| ProbeKind::NormalizedSelfTyAssembly).enter(|ecx| {
if num_steps < ecx.local_overflow_limit() {
let normalized_ty = ecx.next_ty_infer();
let normalizes_to_goal = goal.with(
@@ -527,7 +469,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
// FIXME: These should ideally not exist as a self type. It would be nice for
// the builtin auto trait impls of generators to instead directly recurse
// into the witness.
- ty::GeneratorWitness(_) | ty::GeneratorWitnessMIR(_, _) => (),
+ ty::GeneratorWitness(..) => (),
// These variants should not exist as a self type.
ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_))
@@ -679,8 +621,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
| ty::Dynamic(..)
| ty::Closure(..)
| ty::Generator(..)
- | ty::GeneratorWitness(_)
- | ty::GeneratorWitnessMIR(..)
+ | ty::GeneratorWitness(..)
| ty::Never
| ty::Tuple(_)
| ty::Param(_)
@@ -836,8 +777,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
| ty::Alias(..)
| ty::Closure(..)
| ty::Generator(..)
- | ty::GeneratorWitness(_)
- | ty::GeneratorWitnessMIR(..)
+ | ty::GeneratorWitness(..)
| ty::Never
| ty::Tuple(_)
| ty::Param(_)
@@ -910,7 +850,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
SolverMode::Coherence => {}
};
- let result = self.probe_candidate("coherence unknowable").enter(|ecx| {
+ let result = self.probe_misc_candidate("coherence unknowable").enter(|ecx| {
let trait_ref = goal.predicate.trait_ref(tcx);
#[derive(Debug)]
diff --git a/compiler/rustc_trait_selection/src/solve/assembly/structural_traits.rs b/compiler/rustc_trait_selection/src/solve/assembly/structural_traits.rs
index c47767101..16f288045 100644
--- a/compiler/rustc_trait_selection/src/solve/assembly/structural_traits.rs
+++ b/compiler/rustc_trait_selection/src/solve/assembly/structural_traits.rs
@@ -14,6 +14,7 @@ use crate::solve::EvalCtxt;
//
// For types with an "existential" binder, i.e. generator witnesses, we also
// instantiate the binder with placeholders eagerly.
+#[instrument(level = "debug", skip(ecx), ret)]
pub(in crate::solve) fn instantiate_constituent_tys_for_auto_trait<'tcx>(
ecx: &EvalCtxt<'_, 'tcx>,
ty: Ty<'tcx>,
@@ -61,9 +62,7 @@ pub(in crate::solve) fn instantiate_constituent_tys_for_auto_trait<'tcx>(
Ok(vec![generator_args.tupled_upvars_ty(), generator_args.witness()])
}
- ty::GeneratorWitness(types) => Ok(ecx.instantiate_binder_with_placeholders(types).to_vec()),
-
- ty::GeneratorWitnessMIR(def_id, args) => Ok(ecx
+ ty::GeneratorWitness(def_id, args) => Ok(ecx
.tcx()
.generator_hidden_types(def_id)
.map(|bty| {
@@ -96,8 +95,7 @@ pub(in crate::solve) fn replace_erased_lifetimes_with_bound_vars<'tcx>(
let mut counter = 0;
let ty = tcx.fold_regions(ty, |r, current_depth| match r.kind() {
ty::ReErased => {
- let br =
- ty::BoundRegion { var: ty::BoundVar::from_u32(counter), kind: ty::BrAnon(None) };
+ let br = ty::BoundRegion { var: ty::BoundVar::from_u32(counter), kind: ty::BrAnon };
counter += 1;
ty::Region::new_late_bound(tcx, current_depth, br)
}
@@ -105,11 +103,12 @@ pub(in crate::solve) fn replace_erased_lifetimes_with_bound_vars<'tcx>(
r => bug!("unexpected region: {r:?}"),
});
let bound_vars = tcx.mk_bound_variable_kinds_from_iter(
- (0..counter).map(|_| ty::BoundVariableKind::Region(ty::BrAnon(None))),
+ (0..counter).map(|_| ty::BoundVariableKind::Region(ty::BrAnon)),
);
ty::Binder::bind_with_vars(ty, bound_vars)
}
+#[instrument(level = "debug", skip(ecx), ret)]
pub(in crate::solve) fn instantiate_constituent_tys_for_sized_trait<'tcx>(
ecx: &EvalCtxt<'_, 'tcx>,
ty: Ty<'tcx>,
@@ -127,7 +126,6 @@ pub(in crate::solve) fn instantiate_constituent_tys_for_sized_trait<'tcx>(
| ty::Ref(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Array(..)
| ty::Closure(..)
| ty::Never
@@ -156,6 +154,7 @@ pub(in crate::solve) fn instantiate_constituent_tys_for_sized_trait<'tcx>(
}
}
+#[instrument(level = "debug", skip(ecx), ret)]
pub(in crate::solve) fn instantiate_constituent_tys_for_copy_clone_trait<'tcx>(
ecx: &EvalCtxt<'_, 'tcx>,
ty: Ty<'tcx>,
@@ -204,9 +203,7 @@ pub(in crate::solve) fn instantiate_constituent_tys_for_copy_clone_trait<'tcx>(
}
}
- ty::GeneratorWitness(types) => Ok(ecx.instantiate_binder_with_placeholders(types).to_vec()),
-
- ty::GeneratorWitnessMIR(def_id, args) => Ok(ecx
+ ty::GeneratorWitness(def_id, args) => Ok(ecx
.tcx()
.generator_hidden_types(def_id)
.map(|bty| {
@@ -282,8 +279,7 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_callable<'tcx>(
| ty::Ref(_, _, _)
| ty::Dynamic(_, _, _)
| ty::Generator(_, _, _)
- | ty::GeneratorWitness(_)
- | ty::GeneratorWitnessMIR(..)
+ | ty::GeneratorWitness(..)
| ty::Never
| ty::Tuple(_)
| ty::Alias(_, _)
@@ -354,6 +350,12 @@ pub(in crate::solve) fn predicates_for_object_candidate<'tcx>(
// FIXME(associated_const_equality): Also add associated consts to
// the requirements here.
if item.kind == ty::AssocKind::Type {
+ // associated types that require `Self: Sized` do not show up in the built-in
+ // implementation of `Trait for dyn Trait`, and can be dropped here.
+ if tcx.generics_require_sized_self(item.def_id) {
+ continue;
+ }
+
requirements
.extend(tcx.item_bounds(item.def_id).iter_instantiated(tcx, trait_ref.args));
}
diff --git a/compiler/rustc_trait_selection/src/solve/canonicalize.rs b/compiler/rustc_trait_selection/src/solve/canonicalize.rs
index a9d182abf..aa92b924e 100644
--- a/compiler/rustc_trait_selection/src/solve/canonicalize.rs
+++ b/compiler/rustc_trait_selection/src/solve/canonicalize.rs
@@ -269,7 +269,7 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'_, 'tcx> {
self.primitive_var_infos.push(CanonicalVarInfo { kind });
var
});
- let br = ty::BoundRegion { var, kind: BrAnon(None) };
+ let br = ty::BoundRegion { var, kind: BrAnon };
ty::Region::new_late_bound(self.interner(), self.binder_index, br)
}
@@ -330,8 +330,7 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'_, 'tcx> {
| ty::Dynamic(_, _, _)
| ty::Closure(_, _)
| ty::Generator(_, _, _)
- | ty::GeneratorWitness(_)
- | ty::GeneratorWitnessMIR(..)
+ | ty::GeneratorWitness(..)
| ty::Never
| ty::Tuple(_)
| ty::Alias(_, _)
@@ -365,6 +364,17 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for Canonicalizer<'_, 'tcx> {
// FIXME: we should fold this ty eventually
CanonicalVarKind::Const(ui, c.ty())
}
+ ty::ConstKind::Infer(ty::InferConst::EffectVar(vid)) => {
+ assert_eq!(
+ self.infcx.root_effect_var(vid),
+ vid,
+ "effect var should have been resolved"
+ );
+ let None = self.infcx.probe_effect_var(vid) else {
+ bug!("effect var should have been resolved");
+ };
+ CanonicalVarKind::Effect
+ }
ty::ConstKind::Infer(ty::InferConst::Fresh(_)) => {
bug!("fresh var during canonicalization: {c:?}")
}
diff --git a/compiler/rustc_trait_selection/src/solve/eval_ctxt.rs b/compiler/rustc_trait_selection/src/solve/eval_ctxt.rs
index 5c2cbe399..066129d8e 100644
--- a/compiler/rustc_trait_selection/src/solve/eval_ctxt.rs
+++ b/compiler/rustc_trait_selection/src/solve/eval_ctxt.rs
@@ -28,8 +28,8 @@ use std::ops::ControlFlow;
use crate::traits::vtable::{count_own_vtable_entries, prepare_vtable_segments, VtblSegment};
use super::inspect::ProofTreeBuilder;
-use super::search_graph;
use super::SolverMode;
+use super::{search_graph, GoalEvaluationKind};
use super::{search_graph::SearchGraph, Goal};
pub use select::InferCtxtSelectExt;
@@ -85,7 +85,7 @@ pub struct EvalCtxt<'a, 'tcx> {
// evaluation code.
tainted: Result<(), NoSolution>,
- inspect: ProofTreeBuilder<'tcx>,
+ pub(super) inspect: ProofTreeBuilder<'tcx>,
}
#[derive(Debug, Clone)]
@@ -164,7 +164,7 @@ impl<'tcx> InferCtxtEvalExt<'tcx> for InferCtxt<'tcx> {
Option<inspect::GoalEvaluation<'tcx>>,
) {
EvalCtxt::enter_root(self, generate_proof_tree, |ecx| {
- ecx.evaluate_goal(IsNormalizesToHack::No, goal)
+ ecx.evaluate_goal(GoalEvaluationKind::Root, goal)
})
}
}
@@ -237,7 +237,7 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
search_graph: &'a mut search_graph::SearchGraph<'tcx>,
canonical_input: CanonicalInput<'tcx>,
- goal_evaluation: &mut ProofTreeBuilder<'tcx>,
+ canonical_goal_evaluation: &mut ProofTreeBuilder<'tcx>,
f: impl FnOnce(&mut EvalCtxt<'_, 'tcx>, Goal<'tcx, ty::Predicate<'tcx>>) -> R,
) -> R {
let intercrate = match search_graph.solver_mode() {
@@ -260,7 +260,7 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
search_graph,
nested_goals: NestedGoals::new(),
tainted: Ok(()),
- inspect: goal_evaluation.new_goal_evaluation_step(input),
+ inspect: canonical_goal_evaluation.new_goal_evaluation_step(input),
};
for &(key, ty) in &input.predefined_opaques_in_body.opaque_types {
@@ -274,7 +274,7 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
let result = f(&mut ecx, input.goal);
- goal_evaluation.goal_evaluation_step(ecx.inspect);
+ canonical_goal_evaluation.goal_evaluation_step(ecx.inspect);
// When creating a query response we clone the opaque type constraints
// instead of taking them. This would cause an ICE here, since we have
@@ -302,24 +302,25 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
search_graph: &'a mut search_graph::SearchGraph<'tcx>,
canonical_input: CanonicalInput<'tcx>,
- mut goal_evaluation: &mut ProofTreeBuilder<'tcx>,
+ goal_evaluation: &mut ProofTreeBuilder<'tcx>,
) -> QueryResult<'tcx> {
- goal_evaluation.canonicalized_goal(canonical_input);
+ let mut canonical_goal_evaluation =
+ goal_evaluation.new_canonical_goal_evaluation(canonical_input);
// Deal with overflow, caching, and coinduction.
//
// The actual solver logic happens in `ecx.compute_goal`.
- ensure_sufficient_stack(|| {
+ let result = ensure_sufficient_stack(|| {
search_graph.with_new_goal(
tcx,
canonical_input,
- goal_evaluation,
- |search_graph, goal_evaluation| {
+ &mut canonical_goal_evaluation,
+ |search_graph, canonical_goal_evaluation| {
EvalCtxt::enter_canonical(
tcx,
search_graph,
canonical_input,
- goal_evaluation,
+ canonical_goal_evaluation,
|ecx, goal| {
let result = ecx.compute_goal(goal);
ecx.inspect.query_result(result);
@@ -328,18 +329,23 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
)
},
)
- })
+ });
+
+ canonical_goal_evaluation.query_result(result);
+ goal_evaluation.canonical_goal_evaluation(canonical_goal_evaluation);
+ result
}
/// Recursively evaluates `goal`, returning whether any inference vars have
/// been constrained and the certainty of the result.
fn evaluate_goal(
&mut self,
- is_normalizes_to_hack: IsNormalizesToHack,
+ goal_evaluation_kind: GoalEvaluationKind,
goal: Goal<'tcx, ty::Predicate<'tcx>>,
) -> Result<(bool, Certainty, Vec<Goal<'tcx, ty::Predicate<'tcx>>>), NoSolution> {
let (orig_values, canonical_goal) = self.canonicalize_goal(goal);
- let mut goal_evaluation = self.inspect.new_goal_evaluation(goal, is_normalizes_to_hack);
+ let mut goal_evaluation =
+ self.inspect.new_goal_evaluation(goal, &orig_values, goal_evaluation_kind);
let encountered_overflow = self.search_graph.encountered_overflow();
let canonical_response = EvalCtxt::evaluate_canonical_goal(
self.tcx(),
@@ -347,7 +353,6 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
canonical_goal,
&mut goal_evaluation,
);
- goal_evaluation.query_result(canonical_response);
let canonical_response = match canonical_response {
Err(e) => {
self.inspect.goal_evaluation(goal_evaluation);
@@ -385,7 +390,10 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
// solver cycle.
if cfg!(debug_assertions)
&& has_changed
- && is_normalizes_to_hack == IsNormalizesToHack::No
+ && !matches!(
+ goal_evaluation_kind,
+ GoalEvaluationKind::Nested { is_normalizes_to_hack: IsNormalizesToHack::Yes }
+ )
&& !self.search_graph.in_cycle()
{
// The nested evaluation has to happen with the original state
@@ -537,7 +545,7 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
/// Iterate over all added goals: returning `Ok(Some(_))` in case we can stop rerunning.
///
- /// Goals for the next step get directly added the the nested goals of the `EvalCtxt`.
+ /// Goals for the next step get directly added to the nested goals of the `EvalCtxt`.
fn evaluate_added_goals_step(&mut self) -> Result<Option<Certainty>, NoSolution> {
let tcx = self.tcx();
let mut goals = core::mem::replace(&mut self.nested_goals, NestedGoals::new());
@@ -557,9 +565,11 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
},
);
- let (_, certainty, instantiate_goals) =
- self.evaluate_goal(IsNormalizesToHack::Yes, unconstrained_goal)?;
- self.add_goals(instantiate_goals);
+ let (_, certainty, instantiate_goals) = self.evaluate_goal(
+ GoalEvaluationKind::Nested { is_normalizes_to_hack: IsNormalizesToHack::Yes },
+ unconstrained_goal,
+ )?;
+ self.nested_goals.goals.extend(instantiate_goals);
// Finally, equate the goal's RHS with the unconstrained var.
// We put the nested goals from this into goals instead of
@@ -592,9 +602,11 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
}
for goal in goals.goals.drain(..) {
- let (has_changed, certainty, instantiate_goals) =
- self.evaluate_goal(IsNormalizesToHack::No, goal)?;
- self.add_goals(instantiate_goals);
+ let (has_changed, certainty, instantiate_goals) = self.evaluate_goal(
+ GoalEvaluationKind::Nested { is_normalizes_to_hack: IsNormalizesToHack::No },
+ goal,
+ )?;
+ self.nested_goals.goals.extend(instantiate_goals);
if has_changed {
unchanged_certainty = None;
}
@@ -602,7 +614,7 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
match certainty {
Certainty::Yes => {}
Certainty::Maybe(_) => {
- self.add_goal(goal);
+ self.nested_goals.goals.push(goal);
unchanged_certainty = unchanged_certainty.map(|c| c.unify_with(certainty));
}
}
@@ -916,7 +928,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
if candidate_key.def_id != key.def_id {
continue;
}
- values.extend(self.probe_candidate("opaque type storage").enter(|ecx| {
+ values.extend(self.probe_misc_candidate("opaque type storage").enter(|ecx| {
for (a, b) in std::iter::zip(candidate_key.args, key.args) {
ecx.eq(param_env, a, b)?;
}
@@ -945,8 +957,10 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
use rustc_middle::mir::interpret::ErrorHandled;
match self.infcx.try_const_eval_resolve(param_env, unevaluated, ty, None) {
Ok(ct) => Some(ct),
- Err(ErrorHandled::Reported(e)) => Some(ty::Const::new_error(self.tcx(), e.into(), ty)),
- Err(ErrorHandled::TooGeneric) => None,
+ Err(ErrorHandled::Reported(e, _)) => {
+ Some(ty::Const::new_error(self.tcx(), e.into(), ty))
+ }
+ Err(ErrorHandled::TooGeneric(_)) => None,
}
}
diff --git a/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs b/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs
index 523841951..b3f9218d7 100644
--- a/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs
+++ b/compiler/rustc_trait_selection/src/solve/eval_ctxt/canonical.rs
@@ -10,17 +10,21 @@
//! [c]: https://rustc-dev-guide.rust-lang.org/solve/canonicalization.html
use super::{CanonicalInput, Certainty, EvalCtxt, Goal};
use crate::solve::canonicalize::{CanonicalizeMode, Canonicalizer};
-use crate::solve::{response_no_constraints_raw, CanonicalResponse, QueryResult, Response};
+use crate::solve::{
+ inspect, response_no_constraints_raw, CanonicalResponse, QueryResult, Response,
+};
use rustc_data_structures::fx::FxHashSet;
use rustc_index::IndexVec;
use rustc_infer::infer::canonical::query_response::make_query_region_constraints;
use rustc_infer::infer::canonical::CanonicalVarValues;
use rustc_infer::infer::canonical::{CanonicalExt, QueryRegionConstraints};
-use rustc_infer::infer::InferCtxt;
+use rustc_infer::infer::{DefineOpaqueTypes, InferCtxt, InferOk};
+use rustc_middle::infer::canonical::Canonical;
use rustc_middle::traits::query::NoSolution;
use rustc_middle::traits::solve::{
ExternalConstraintsData, MaybeCause, PredefinedOpaquesData, QueryInput,
};
+use rustc_middle::traits::ObligationCause;
use rustc_middle::ty::{
self, BoundVar, GenericArgKind, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable,
TypeVisitableExt,
@@ -29,6 +33,22 @@ use rustc_span::DUMMY_SP;
use std::iter;
use std::ops::Deref;
+trait ResponseT<'tcx> {
+ fn var_values(&self) -> CanonicalVarValues<'tcx>;
+}
+
+impl<'tcx> ResponseT<'tcx> for Response<'tcx> {
+ fn var_values(&self) -> CanonicalVarValues<'tcx> {
+ self.var_values
+ }
+}
+
+impl<'tcx, T> ResponseT<'tcx> for inspect::State<'tcx, T> {
+ fn var_values(&self) -> CanonicalVarValues<'tcx> {
+ self.var_values
+ }
+}
+
impl<'tcx> EvalCtxt<'_, 'tcx> {
/// Canonicalizes the goal remembering the original values
/// for each bound variable.
@@ -188,12 +208,14 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
original_values: Vec<ty::GenericArg<'tcx>>,
response: CanonicalResponse<'tcx>,
) -> Result<(Certainty, Vec<Goal<'tcx, ty::Predicate<'tcx>>>), NoSolution> {
- let substitution = self.compute_query_response_substitution(&original_values, &response);
+ let substitution =
+ Self::compute_query_response_substitution(self.infcx, &original_values, &response);
let Response { var_values, external_constraints, certainty } =
response.substitute(self.tcx(), &substitution);
- let nested_goals = self.unify_query_var_values(param_env, &original_values, var_values)?;
+ let nested_goals =
+ Self::unify_query_var_values(self.infcx, param_env, &original_values, var_values)?;
let ExternalConstraintsData { region_constraints, opaque_types } =
external_constraints.deref();
@@ -206,21 +228,21 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
/// This returns the substitutions to instantiate the bound variables of
/// the canonical response. This depends on the `original_values` for the
/// bound variables.
- fn compute_query_response_substitution(
- &self,
+ fn compute_query_response_substitution<T: ResponseT<'tcx>>(
+ infcx: &InferCtxt<'tcx>,
original_values: &[ty::GenericArg<'tcx>],
- response: &CanonicalResponse<'tcx>,
+ response: &Canonical<'tcx, T>,
) -> CanonicalVarValues<'tcx> {
// FIXME: Longterm canonical queries should deal with all placeholders
// created inside of the query directly instead of returning them to the
// caller.
- let prev_universe = self.infcx.universe();
+ let prev_universe = infcx.universe();
let universes_created_in_query = response.max_universe.index();
for _ in 0..universes_created_in_query {
- self.infcx.create_next_universe();
+ infcx.create_next_universe();
}
- let var_values = response.value.var_values;
+ let var_values = response.value.var_values();
assert_eq!(original_values.len(), var_values.len());
// If the query did not make progress with constraining inference variables,
@@ -254,13 +276,13 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
}
}
- let var_values = self.tcx().mk_args_from_iter(response.variables.iter().enumerate().map(
+ let var_values = infcx.tcx.mk_args_from_iter(response.variables.iter().enumerate().map(
|(index, info)| {
if info.universe() != ty::UniverseIndex::ROOT {
// A variable from inside a binder of the query. While ideally these shouldn't
// exist at all (see the FIXME at the start of this method), we have to deal with
// them for now.
- self.infcx.instantiate_canonical_var(DUMMY_SP, info, |idx| {
+ infcx.instantiate_canonical_var(DUMMY_SP, info, |idx| {
ty::UniverseIndex::from(prev_universe.index() + idx.index())
})
} else if info.is_existential() {
@@ -274,7 +296,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
if let Some(v) = opt_values[BoundVar::from_usize(index)] {
v
} else {
- self.infcx.instantiate_canonical_var(DUMMY_SP, info, |_| prev_universe)
+ infcx.instantiate_canonical_var(DUMMY_SP, info, |_| prev_universe)
}
} else {
// For placeholders which were already part of the input, we simply map this
@@ -287,9 +309,9 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
CanonicalVarValues { var_values }
}
- #[instrument(level = "debug", skip(self, param_env), ret)]
+ #[instrument(level = "debug", skip(infcx, param_env), ret)]
fn unify_query_var_values(
- &self,
+ infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
original_values: &[ty::GenericArg<'tcx>],
var_values: CanonicalVarValues<'tcx>,
@@ -298,7 +320,18 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
let mut nested_goals = vec![];
for (&orig, response) in iter::zip(original_values, var_values.var_values) {
- nested_goals.extend(self.eq_and_get_goals(param_env, orig, response)?);
+ nested_goals.extend(
+ infcx
+ .at(&ObligationCause::dummy(), param_env)
+ .eq(DefineOpaqueTypes::No, orig, response)
+ .map(|InferOk { value: (), obligations }| {
+ obligations.into_iter().map(|o| Goal::from(o))
+ })
+ .map_err(|e| {
+ debug!(?e, "failed to equate");
+ NoSolution
+ })?,
+ );
}
Ok(nested_goals)
@@ -382,6 +415,17 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for EagerResolver<'_, 'tcx> {
}
}
}
+ ty::ConstKind::Infer(ty::InferConst::EffectVar(vid)) => {
+ debug_assert_eq!(c.ty(), self.infcx.tcx.types.bool);
+ match self.infcx.probe_effect_var(vid) {
+ Some(c) => c.as_const(self.infcx.tcx),
+ None => ty::Const::new_infer(
+ self.infcx.tcx,
+ ty::InferConst::EffectVar(self.infcx.root_effect_var(vid)),
+ self.infcx.tcx.types.bool,
+ ),
+ }
+ }
_ => {
if c.has_infer() {
c.super_fold_with(self)
@@ -392,3 +436,35 @@ impl<'tcx> TypeFolder<TyCtxt<'tcx>> for EagerResolver<'_, 'tcx> {
}
}
}
+
+impl<'tcx> inspect::ProofTreeBuilder<'tcx> {
+ pub fn make_canonical_state<T: TypeFoldable<TyCtxt<'tcx>>>(
+ ecx: &EvalCtxt<'_, 'tcx>,
+ data: T,
+ ) -> inspect::CanonicalState<'tcx, T> {
+ let state = inspect::State { var_values: ecx.var_values, data };
+ let state = state.fold_with(&mut EagerResolver { infcx: ecx.infcx });
+ Canonicalizer::canonicalize(
+ ecx.infcx,
+ CanonicalizeMode::Response { max_input_universe: ecx.max_input_universe },
+ &mut vec![],
+ state,
+ )
+ }
+
+ pub fn instantiate_canonical_state<T: TypeFoldable<TyCtxt<'tcx>>>(
+ infcx: &InferCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ original_values: &[ty::GenericArg<'tcx>],
+ state: inspect::CanonicalState<'tcx, T>,
+ ) -> Result<(Vec<Goal<'tcx, ty::Predicate<'tcx>>>, T), NoSolution> {
+ let substitution =
+ EvalCtxt::compute_query_response_substitution(infcx, original_values, &state);
+
+ let inspect::State { var_values, data } = state.substitute(infcx.tcx, &substitution);
+
+ let nested_goals =
+ EvalCtxt::unify_query_var_values(infcx, param_env, original_values, var_values)?;
+ Ok((nested_goals, data))
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/solve/eval_ctxt/probe.rs b/compiler/rustc_trait_selection/src/solve/eval_ctxt/probe.rs
index 317c43baf..6087b9167 100644
--- a/compiler/rustc_trait_selection/src/solve/eval_ctxt/probe.rs
+++ b/compiler/rustc_trait_selection/src/solve/eval_ctxt/probe.rs
@@ -1,5 +1,5 @@
use super::EvalCtxt;
-use rustc_middle::traits::solve::{inspect, QueryResult};
+use rustc_middle::traits::solve::{inspect, CandidateSource, QueryResult};
use std::marker::PhantomData;
pub(in crate::solve) struct ProbeCtxt<'me, 'a, 'tcx, F, T> {
@@ -10,7 +10,7 @@ pub(in crate::solve) struct ProbeCtxt<'me, 'a, 'tcx, F, T> {
impl<'tcx, F, T> ProbeCtxt<'_, '_, 'tcx, F, T>
where
- F: FnOnce(&T) -> inspect::CandidateKind<'tcx>,
+ F: FnOnce(&T) -> inspect::ProbeKind<'tcx>,
{
pub(in crate::solve) fn enter(self, f: impl FnOnce(&mut EvalCtxt<'_, 'tcx>) -> T) -> T {
let ProbeCtxt { ecx: outer_ecx, probe_kind, _result } = self;
@@ -24,13 +24,13 @@ where
search_graph: outer_ecx.search_graph,
nested_goals: outer_ecx.nested_goals.clone(),
tainted: outer_ecx.tainted,
- inspect: outer_ecx.inspect.new_goal_candidate(),
+ inspect: outer_ecx.inspect.new_probe(),
};
let r = nested_ecx.infcx.probe(|_| f(&mut nested_ecx));
if !outer_ecx.inspect.is_noop() {
- let cand_kind = probe_kind(&r);
- nested_ecx.inspect.candidate_kind(cand_kind);
- outer_ecx.inspect.goal_candidate(nested_ecx.inspect);
+ let probe_kind = probe_kind(&r);
+ nested_ecx.inspect.probe_kind(probe_kind);
+ outer_ecx.inspect.finish_probe(nested_ecx.inspect);
}
r
}
@@ -41,25 +41,45 @@ impl<'a, 'tcx> EvalCtxt<'a, 'tcx> {
/// as expensive as necessary to output the desired information.
pub(in crate::solve) fn probe<F, T>(&mut self, probe_kind: F) -> ProbeCtxt<'_, 'a, 'tcx, F, T>
where
- F: FnOnce(&T) -> inspect::CandidateKind<'tcx>,
+ F: FnOnce(&T) -> inspect::ProbeKind<'tcx>,
{
ProbeCtxt { ecx: self, probe_kind, _result: PhantomData }
}
- pub(in crate::solve) fn probe_candidate(
+ pub(in crate::solve) fn probe_misc_candidate(
&mut self,
name: &'static str,
) -> ProbeCtxt<
'_,
'a,
'tcx,
- impl FnOnce(&QueryResult<'tcx>) -> inspect::CandidateKind<'tcx>,
+ impl FnOnce(&QueryResult<'tcx>) -> inspect::ProbeKind<'tcx>,
QueryResult<'tcx>,
> {
ProbeCtxt {
ecx: self,
- probe_kind: move |result: &QueryResult<'tcx>| inspect::CandidateKind::Candidate {
- name: name.to_string(),
+ probe_kind: move |result: &QueryResult<'tcx>| inspect::ProbeKind::MiscCandidate {
+ name,
+ result: *result,
+ },
+ _result: PhantomData,
+ }
+ }
+
+ pub(in crate::solve) fn probe_trait_candidate(
+ &mut self,
+ source: CandidateSource,
+ ) -> ProbeCtxt<
+ '_,
+ 'a,
+ 'tcx,
+ impl FnOnce(&QueryResult<'tcx>) -> inspect::ProbeKind<'tcx>,
+ QueryResult<'tcx>,
+ > {
+ ProbeCtxt {
+ ecx: self,
+ probe_kind: move |result: &QueryResult<'tcx>| inspect::ProbeKind::TraitCandidate {
+ source,
result: *result,
},
_result: PhantomData,
diff --git a/compiler/rustc_trait_selection/src/solve/eval_ctxt/select.rs b/compiler/rustc_trait_selection/src/solve/eval_ctxt/select.rs
index 42d7a587c..315df06be 100644
--- a/compiler/rustc_trait_selection/src/solve/eval_ctxt/select.rs
+++ b/compiler/rustc_trait_selection/src/solve/eval_ctxt/select.rs
@@ -4,14 +4,14 @@ use rustc_infer::infer::{DefineOpaqueTypes, InferCtxt};
use rustc_infer::traits::{
Obligation, PolyTraitObligation, PredicateObligation, Selection, SelectionResult, TraitEngine,
};
-use rustc_middle::traits::solve::{CanonicalInput, Certainty, Goal};
+use rustc_middle::traits::solve::{CandidateSource, CanonicalInput, Certainty, Goal};
use rustc_middle::traits::{
BuiltinImplSource, ImplSource, ImplSourceUserDefinedData, ObligationCause, SelectionError,
};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::DUMMY_SP;
-use crate::solve::assembly::{Candidate, CandidateSource};
+use crate::solve::assembly::Candidate;
use crate::solve::eval_ctxt::{EvalCtxt, GenerateProofTree};
use crate::solve::inspect::ProofTreeBuilder;
use crate::traits::StructurallyNormalizeExt;
diff --git a/compiler/rustc_trait_selection/src/solve/inspect.rs b/compiler/rustc_trait_selection/src/solve/inspect.rs
deleted file mode 100644
index cda683963..000000000
--- a/compiler/rustc_trait_selection/src/solve/inspect.rs
+++ /dev/null
@@ -1,428 +0,0 @@
-use rustc_middle::traits::query::NoSolution;
-use rustc_middle::traits::solve::inspect::{self, CacheHit, CandidateKind};
-use rustc_middle::traits::solve::{
- CanonicalInput, Certainty, Goal, IsNormalizesToHack, QueryInput, QueryResult,
-};
-use rustc_middle::ty::{self, TyCtxt};
-use rustc_session::config::DumpSolverProofTree;
-
-use super::eval_ctxt::UseGlobalCache;
-use super::GenerateProofTree;
-
-#[derive(Eq, PartialEq, Debug, Hash, HashStable)]
-pub struct WipGoalEvaluation<'tcx> {
- pub uncanonicalized_goal: Goal<'tcx, ty::Predicate<'tcx>>,
- pub canonicalized_goal: Option<CanonicalInput<'tcx>>,
-
- pub evaluation_steps: Vec<WipGoalEvaluationStep<'tcx>>,
-
- pub cache_hit: Option<CacheHit>,
- pub is_normalizes_to_hack: IsNormalizesToHack,
- pub returned_goals: Vec<Goal<'tcx, ty::Predicate<'tcx>>>,
-
- pub result: Option<QueryResult<'tcx>>,
-}
-
-impl<'tcx> WipGoalEvaluation<'tcx> {
- pub fn finalize(self) -> inspect::GoalEvaluation<'tcx> {
- inspect::GoalEvaluation {
- uncanonicalized_goal: self.uncanonicalized_goal,
- canonicalized_goal: self.canonicalized_goal.unwrap(),
- kind: match self.cache_hit {
- Some(hit) => inspect::GoalEvaluationKind::CacheHit(hit),
- None => inspect::GoalEvaluationKind::Uncached {
- revisions: self
- .evaluation_steps
- .into_iter()
- .map(WipGoalEvaluationStep::finalize)
- .collect(),
- },
- },
- is_normalizes_to_hack: self.is_normalizes_to_hack,
- returned_goals: self.returned_goals,
- result: self.result.unwrap(),
- }
- }
-}
-
-#[derive(Eq, PartialEq, Debug, Hash, HashStable)]
-pub struct WipAddedGoalsEvaluation<'tcx> {
- pub evaluations: Vec<Vec<WipGoalEvaluation<'tcx>>>,
- pub result: Option<Result<Certainty, NoSolution>>,
-}
-
-impl<'tcx> WipAddedGoalsEvaluation<'tcx> {
- pub fn finalize(self) -> inspect::AddedGoalsEvaluation<'tcx> {
- inspect::AddedGoalsEvaluation {
- evaluations: self
- .evaluations
- .into_iter()
- .map(|evaluations| {
- evaluations.into_iter().map(WipGoalEvaluation::finalize).collect()
- })
- .collect(),
- result: self.result.unwrap(),
- }
- }
-}
-
-#[derive(Eq, PartialEq, Debug, Hash, HashStable)]
-pub struct WipGoalEvaluationStep<'tcx> {
- pub instantiated_goal: QueryInput<'tcx, ty::Predicate<'tcx>>,
-
- pub nested_goal_evaluations: Vec<WipAddedGoalsEvaluation<'tcx>>,
- pub candidates: Vec<WipGoalCandidate<'tcx>>,
-
- pub result: Option<QueryResult<'tcx>>,
-}
-
-impl<'tcx> WipGoalEvaluationStep<'tcx> {
- pub fn finalize(self) -> inspect::GoalEvaluationStep<'tcx> {
- inspect::GoalEvaluationStep {
- instantiated_goal: self.instantiated_goal,
- nested_goal_evaluations: self
- .nested_goal_evaluations
- .into_iter()
- .map(WipAddedGoalsEvaluation::finalize)
- .collect(),
- candidates: self.candidates.into_iter().map(WipGoalCandidate::finalize).collect(),
- result: self.result.unwrap(),
- }
- }
-}
-
-#[derive(Eq, PartialEq, Debug, Hash, HashStable)]
-pub struct WipGoalCandidate<'tcx> {
- pub nested_goal_evaluations: Vec<WipAddedGoalsEvaluation<'tcx>>,
- pub candidates: Vec<WipGoalCandidate<'tcx>>,
- pub kind: Option<CandidateKind<'tcx>>,
-}
-
-impl<'tcx> WipGoalCandidate<'tcx> {
- pub fn finalize(self) -> inspect::GoalCandidate<'tcx> {
- inspect::GoalCandidate {
- nested_goal_evaluations: self
- .nested_goal_evaluations
- .into_iter()
- .map(WipAddedGoalsEvaluation::finalize)
- .collect(),
- candidates: self.candidates.into_iter().map(WipGoalCandidate::finalize).collect(),
- kind: self.kind.unwrap(),
- }
- }
-}
-
-#[derive(Debug)]
-pub enum DebugSolver<'tcx> {
- Root,
- GoalEvaluation(WipGoalEvaluation<'tcx>),
- AddedGoalsEvaluation(WipAddedGoalsEvaluation<'tcx>),
- GoalEvaluationStep(WipGoalEvaluationStep<'tcx>),
- GoalCandidate(WipGoalCandidate<'tcx>),
-}
-
-impl<'tcx> From<WipGoalEvaluation<'tcx>> for DebugSolver<'tcx> {
- fn from(g: WipGoalEvaluation<'tcx>) -> DebugSolver<'tcx> {
- DebugSolver::GoalEvaluation(g)
- }
-}
-
-impl<'tcx> From<WipAddedGoalsEvaluation<'tcx>> for DebugSolver<'tcx> {
- fn from(g: WipAddedGoalsEvaluation<'tcx>) -> DebugSolver<'tcx> {
- DebugSolver::AddedGoalsEvaluation(g)
- }
-}
-
-impl<'tcx> From<WipGoalEvaluationStep<'tcx>> for DebugSolver<'tcx> {
- fn from(g: WipGoalEvaluationStep<'tcx>) -> DebugSolver<'tcx> {
- DebugSolver::GoalEvaluationStep(g)
- }
-}
-
-impl<'tcx> From<WipGoalCandidate<'tcx>> for DebugSolver<'tcx> {
- fn from(g: WipGoalCandidate<'tcx>) -> DebugSolver<'tcx> {
- DebugSolver::GoalCandidate(g)
- }
-}
-
-pub struct ProofTreeBuilder<'tcx> {
- state: Option<Box<BuilderData<'tcx>>>,
-}
-
-struct BuilderData<'tcx> {
- tree: DebugSolver<'tcx>,
- use_global_cache: UseGlobalCache,
-}
-
-impl<'tcx> ProofTreeBuilder<'tcx> {
- fn new(
- state: impl Into<DebugSolver<'tcx>>,
- use_global_cache: UseGlobalCache,
- ) -> ProofTreeBuilder<'tcx> {
- ProofTreeBuilder {
- state: Some(Box::new(BuilderData { tree: state.into(), use_global_cache })),
- }
- }
-
- fn nested(&self, state: impl Into<DebugSolver<'tcx>>) -> Self {
- match &self.state {
- Some(prev_state) => Self {
- state: Some(Box::new(BuilderData {
- tree: state.into(),
- use_global_cache: prev_state.use_global_cache,
- })),
- },
- None => Self { state: None },
- }
- }
-
- fn as_mut(&mut self) -> Option<&mut DebugSolver<'tcx>> {
- self.state.as_mut().map(|boxed| &mut boxed.tree)
- }
-
- pub fn finalize(self) -> Option<inspect::GoalEvaluation<'tcx>> {
- match self.state?.tree {
- DebugSolver::GoalEvaluation(wip_goal_evaluation) => {
- Some(wip_goal_evaluation.finalize())
- }
- root => unreachable!("unexpected proof tree builder root node: {:?}", root),
- }
- }
-
- pub fn use_global_cache(&self) -> bool {
- self.state
- .as_ref()
- .map(|state| matches!(state.use_global_cache, UseGlobalCache::Yes))
- .unwrap_or(true)
- }
-
- pub fn new_maybe_root(
- tcx: TyCtxt<'tcx>,
- generate_proof_tree: GenerateProofTree,
- ) -> ProofTreeBuilder<'tcx> {
- match generate_proof_tree {
- GenerateProofTree::Never => ProofTreeBuilder::new_noop(),
- GenerateProofTree::IfEnabled => {
- let opts = &tcx.sess.opts.unstable_opts;
- match opts.dump_solver_proof_tree {
- DumpSolverProofTree::Always => {
- let use_cache = opts.dump_solver_proof_tree_use_cache.unwrap_or(true);
- ProofTreeBuilder::new_root(UseGlobalCache::from_bool(use_cache))
- }
- // `OnError` is handled by reevaluating goals in error
- // reporting with `GenerateProofTree::Yes`.
- DumpSolverProofTree::OnError | DumpSolverProofTree::Never => {
- ProofTreeBuilder::new_noop()
- }
- }
- }
- GenerateProofTree::Yes(use_cache) => ProofTreeBuilder::new_root(use_cache),
- }
- }
-
- pub fn new_root(use_global_cache: UseGlobalCache) -> ProofTreeBuilder<'tcx> {
- ProofTreeBuilder::new(DebugSolver::Root, use_global_cache)
- }
-
- pub fn new_noop() -> ProofTreeBuilder<'tcx> {
- ProofTreeBuilder { state: None }
- }
-
- pub fn is_noop(&self) -> bool {
- self.state.is_none()
- }
-
- pub fn new_goal_evaluation(
- &mut self,
- goal: Goal<'tcx, ty::Predicate<'tcx>>,
- is_normalizes_to_hack: IsNormalizesToHack,
- ) -> ProofTreeBuilder<'tcx> {
- if self.state.is_none() {
- return ProofTreeBuilder { state: None };
- }
-
- self.nested(WipGoalEvaluation {
- uncanonicalized_goal: goal,
- canonicalized_goal: None,
- evaluation_steps: vec![],
- is_normalizes_to_hack,
- cache_hit: None,
- returned_goals: vec![],
- result: None,
- })
- }
-
- pub fn canonicalized_goal(&mut self, canonical_goal: CanonicalInput<'tcx>) {
- if let Some(this) = self.as_mut() {
- match this {
- DebugSolver::GoalEvaluation(goal_evaluation) => {
- assert_eq!(goal_evaluation.canonicalized_goal.replace(canonical_goal), None);
- }
- _ => unreachable!(),
- }
- }
- }
-
- pub fn cache_hit(&mut self, cache_hit: CacheHit) {
- if let Some(this) = self.as_mut() {
- match this {
- DebugSolver::GoalEvaluation(goal_evaluation) => {
- assert_eq!(goal_evaluation.cache_hit.replace(cache_hit), None);
- }
- _ => unreachable!(),
- };
- }
- }
-
- pub fn returned_goals(&mut self, goals: &[Goal<'tcx, ty::Predicate<'tcx>>]) {
- if let Some(this) = self.as_mut() {
- match this {
- DebugSolver::GoalEvaluation(evaluation) => {
- assert!(evaluation.returned_goals.is_empty());
- evaluation.returned_goals.extend(goals);
- }
- _ => unreachable!(),
- }
- }
- }
- pub fn goal_evaluation(&mut self, goal_evaluation: ProofTreeBuilder<'tcx>) {
- if let Some(this) = self.as_mut() {
- match (this, goal_evaluation.state.unwrap().tree) {
- (
- DebugSolver::AddedGoalsEvaluation(WipAddedGoalsEvaluation {
- evaluations, ..
- }),
- DebugSolver::GoalEvaluation(goal_evaluation),
- ) => evaluations.last_mut().unwrap().push(goal_evaluation),
- (this @ DebugSolver::Root, goal_evaluation) => *this = goal_evaluation,
- _ => unreachable!(),
- }
- }
- }
-
- pub fn new_goal_evaluation_step(
- &mut self,
- instantiated_goal: QueryInput<'tcx, ty::Predicate<'tcx>>,
- ) -> ProofTreeBuilder<'tcx> {
- if self.state.is_none() {
- return ProofTreeBuilder { state: None };
- }
-
- self.nested(WipGoalEvaluationStep {
- instantiated_goal,
- nested_goal_evaluations: vec![],
- candidates: vec![],
- result: None,
- })
- }
- pub fn goal_evaluation_step(&mut self, goal_eval_step: ProofTreeBuilder<'tcx>) {
- if let Some(this) = self.as_mut() {
- match (this, goal_eval_step.state.unwrap().tree) {
- (DebugSolver::GoalEvaluation(goal_eval), DebugSolver::GoalEvaluationStep(step)) => {
- goal_eval.evaluation_steps.push(step);
- }
- _ => unreachable!(),
- }
- }
- }
-
- pub fn new_goal_candidate(&mut self) -> ProofTreeBuilder<'tcx> {
- if self.state.is_none() {
- return ProofTreeBuilder { state: None };
- }
-
- self.nested(WipGoalCandidate {
- nested_goal_evaluations: vec![],
- candidates: vec![],
- kind: None,
- })
- }
-
- pub fn candidate_kind(&mut self, candidate_kind: CandidateKind<'tcx>) {
- if let Some(this) = self.as_mut() {
- match this {
- DebugSolver::GoalCandidate(this) => {
- assert_eq!(this.kind.replace(candidate_kind), None)
- }
- _ => unreachable!(),
- }
- }
- }
-
- pub fn goal_candidate(&mut self, candidate: ProofTreeBuilder<'tcx>) {
- if let Some(this) = self.as_mut() {
- match (this, candidate.state.unwrap().tree) {
- (
- DebugSolver::GoalCandidate(WipGoalCandidate { candidates, .. })
- | DebugSolver::GoalEvaluationStep(WipGoalEvaluationStep { candidates, .. }),
- DebugSolver::GoalCandidate(candidate),
- ) => candidates.push(candidate),
- _ => unreachable!(),
- }
- }
- }
-
- pub fn new_evaluate_added_goals(&mut self) -> ProofTreeBuilder<'tcx> {
- if self.state.is_none() {
- return ProofTreeBuilder { state: None };
- }
-
- self.nested(WipAddedGoalsEvaluation { evaluations: vec![], result: None })
- }
-
- pub fn evaluate_added_goals_loop_start(&mut self) {
- if let Some(this) = self.as_mut() {
- match this {
- DebugSolver::AddedGoalsEvaluation(this) => {
- this.evaluations.push(vec![]);
- }
- _ => unreachable!(),
- }
- }
- }
-
- pub fn eval_added_goals_result(&mut self, result: Result<Certainty, NoSolution>) {
- if let Some(this) = self.as_mut() {
- match this {
- DebugSolver::AddedGoalsEvaluation(this) => {
- assert_eq!(this.result.replace(result), None);
- }
- _ => unreachable!(),
- }
- }
- }
-
- pub fn added_goals_evaluation(&mut self, goals_evaluation: ProofTreeBuilder<'tcx>) {
- if let Some(this) = self.as_mut() {
- match (this, goals_evaluation.state.unwrap().tree) {
- (
- DebugSolver::GoalEvaluationStep(WipGoalEvaluationStep {
- nested_goal_evaluations,
- ..
- })
- | DebugSolver::GoalCandidate(WipGoalCandidate {
- nested_goal_evaluations, ..
- }),
- DebugSolver::AddedGoalsEvaluation(added_goals_evaluation),
- ) => nested_goal_evaluations.push(added_goals_evaluation),
- _ => unreachable!(),
- }
- }
- }
-
- pub fn query_result(&mut self, result: QueryResult<'tcx>) {
- if let Some(this) = self.as_mut() {
- match this {
- DebugSolver::GoalEvaluation(goal_evaluation) => {
- assert_eq!(goal_evaluation.result.replace(result), None);
- }
- DebugSolver::GoalEvaluationStep(evaluation_step) => {
- assert_eq!(evaluation_step.result.replace(result), None);
- }
- DebugSolver::Root
- | DebugSolver::AddedGoalsEvaluation(_)
- | DebugSolver::GoalCandidate(_) => unreachable!(),
- }
- }
- }
-}
diff --git a/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs b/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs
new file mode 100644
index 000000000..15c8d9e5b
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs
@@ -0,0 +1,235 @@
+/// An infrastructure to mechanically analyse proof trees.
+///
+/// It is unavoidable that this representation is somewhat
+/// lossy as it should hide quite a few semantically relevant things,
+/// e.g. canonicalization and the order of nested goals.
+///
+/// @lcnr: However, a lot of the weirdness here is not strictly necessary
+/// and could be improved in the future. This is mostly good enough for
+/// coherence right now and was annoying to implement, so I am leaving it
+/// as is until we start using it for something else.
+use std::ops::ControlFlow;
+
+use rustc_infer::infer::InferCtxt;
+use rustc_middle::traits::query::NoSolution;
+use rustc_middle::traits::solve::{inspect, QueryResult};
+use rustc_middle::traits::solve::{Certainty, Goal};
+use rustc_middle::ty;
+
+use crate::solve::inspect::ProofTreeBuilder;
+use crate::solve::{GenerateProofTree, InferCtxtEvalExt, UseGlobalCache};
+
+pub struct InspectGoal<'a, 'tcx> {
+ infcx: &'a InferCtxt<'tcx>,
+ depth: usize,
+ orig_values: &'a [ty::GenericArg<'tcx>],
+ goal: Goal<'tcx, ty::Predicate<'tcx>>,
+ evaluation: &'a inspect::GoalEvaluation<'tcx>,
+}
+
+pub struct InspectCandidate<'a, 'tcx> {
+ goal: &'a InspectGoal<'a, 'tcx>,
+ kind: inspect::ProbeKind<'tcx>,
+ nested_goals: Vec<inspect::CanonicalState<'tcx, Goal<'tcx, ty::Predicate<'tcx>>>>,
+ result: QueryResult<'tcx>,
+}
+
+impl<'a, 'tcx> InspectCandidate<'a, 'tcx> {
+ pub fn infcx(&self) -> &'a InferCtxt<'tcx> {
+ self.goal.infcx
+ }
+
+ pub fn kind(&self) -> inspect::ProbeKind<'tcx> {
+ self.kind
+ }
+
+ pub fn result(&self) -> Result<Certainty, NoSolution> {
+ self.result.map(|c| c.value.certainty)
+ }
+
+ /// Visit the nested goals of this candidate.
+ ///
+ /// FIXME(@lcnr): we have to slightly adapt this API
+ /// to also use it to compute the most relevant goal
+ /// for fulfillment errors. Will do that once we actually
+ /// need it.
+ pub fn visit_nested<V: ProofTreeVisitor<'tcx>>(
+ &self,
+ visitor: &mut V,
+ ) -> ControlFlow<V::BreakTy> {
+ // HACK: An arbitrary cutoff to avoid dealing with overflow and cycles.
+ if self.goal.depth >= 10 {
+ let infcx = self.goal.infcx;
+ infcx.probe(|_| {
+ let mut instantiated_goals = vec![];
+ for goal in &self.nested_goals {
+ let goal = match ProofTreeBuilder::instantiate_canonical_state(
+ infcx,
+ self.goal.goal.param_env,
+ self.goal.orig_values,
+ *goal,
+ ) {
+ Ok((_goals, goal)) => goal,
+ Err(NoSolution) => {
+ warn!(
+ "unexpected failure when instantiating {:?}: {:?}",
+ goal, self.nested_goals
+ );
+ return ControlFlow::Continue(());
+ }
+ };
+ instantiated_goals.push(goal);
+ }
+
+ for &goal in &instantiated_goals {
+ let (_, proof_tree) =
+ infcx.evaluate_root_goal(goal, GenerateProofTree::Yes(UseGlobalCache::No));
+ let proof_tree = proof_tree.unwrap();
+ visitor.visit_goal(&InspectGoal::new(
+ infcx,
+ self.goal.depth + 1,
+ &proof_tree,
+ ))?;
+ }
+
+ ControlFlow::Continue(())
+ })?;
+ }
+ ControlFlow::Continue(())
+ }
+}
+
+impl<'a, 'tcx> InspectGoal<'a, 'tcx> {
+ pub fn infcx(&self) -> &'a InferCtxt<'tcx> {
+ self.infcx
+ }
+
+ pub fn goal(&self) -> Goal<'tcx, ty::Predicate<'tcx>> {
+ self.goal
+ }
+
+ pub fn result(&self) -> Result<Certainty, NoSolution> {
+ self.evaluation.evaluation.result.map(|c| c.value.certainty)
+ }
+
+ fn candidates_recur(
+ &'a self,
+ candidates: &mut Vec<InspectCandidate<'a, 'tcx>>,
+ nested_goals: &mut Vec<inspect::CanonicalState<'tcx, Goal<'tcx, ty::Predicate<'tcx>>>>,
+ probe: &inspect::Probe<'tcx>,
+ ) {
+ for step in &probe.steps {
+ match step {
+ &inspect::ProbeStep::AddGoal(goal) => nested_goals.push(goal),
+ inspect::ProbeStep::EvaluateGoals(_) => (),
+ inspect::ProbeStep::NestedProbe(ref probe) => {
+ // Nested probes have to prove goals added in their parent
+ // but do not leak them, so we truncate the added goals
+ // afterwards.
+ let num_goals = nested_goals.len();
+ self.candidates_recur(candidates, nested_goals, probe);
+ nested_goals.truncate(num_goals);
+ }
+ }
+ }
+
+ match probe.kind {
+ inspect::ProbeKind::NormalizedSelfTyAssembly
+ | inspect::ProbeKind::UnsizeAssembly
+ | inspect::ProbeKind::UpcastProjectionCompatibility => (),
+ // We add a candidate for the root evaluation if there
+ // is only one way to prove a given goal, e.g. for `WellFormed`.
+ //
+ // FIXME: This is currently wrong if we don't even try any
+ // candidates, e.g. for a trait goal, as in this case `candidates` is
+ // actually supposed to be empty.
+ inspect::ProbeKind::Root { result } => {
+ if candidates.is_empty() {
+ candidates.push(InspectCandidate {
+ goal: self,
+ kind: probe.kind,
+ nested_goals: nested_goals.clone(),
+ result,
+ });
+ }
+ }
+ inspect::ProbeKind::MiscCandidate { name: _, result }
+ | inspect::ProbeKind::TraitCandidate { source: _, result } => {
+ candidates.push(InspectCandidate {
+ goal: self,
+ kind: probe.kind,
+ nested_goals: nested_goals.clone(),
+ result,
+ });
+ }
+ }
+ }
+
+ pub fn candidates(&'a self) -> Vec<InspectCandidate<'a, 'tcx>> {
+ let mut candidates = vec![];
+ let last_eval_step = match self.evaluation.evaluation.kind {
+ inspect::CanonicalGoalEvaluationKind::Overflow
+ | inspect::CanonicalGoalEvaluationKind::CacheHit(_) => {
+ warn!("unexpected root evaluation: {:?}", self.evaluation);
+ return vec![];
+ }
+ inspect::CanonicalGoalEvaluationKind::Uncached { ref revisions } => {
+ if let Some(last) = revisions.last() {
+ last
+ } else {
+ return vec![];
+ }
+ }
+ };
+
+ let mut nested_goals = vec![];
+ self.candidates_recur(&mut candidates, &mut nested_goals, &last_eval_step.evaluation);
+
+ candidates
+ }
+
+ fn new(
+ infcx: &'a InferCtxt<'tcx>,
+ depth: usize,
+ root: &'a inspect::GoalEvaluation<'tcx>,
+ ) -> Self {
+ match root.kind {
+ inspect::GoalEvaluationKind::Root { ref orig_values } => InspectGoal {
+ infcx,
+ depth,
+ orig_values,
+ goal: infcx.resolve_vars_if_possible(root.uncanonicalized_goal),
+ evaluation: root,
+ },
+ inspect::GoalEvaluationKind::Nested { .. } => unreachable!(),
+ }
+ }
+}
+
+/// The public API to interact with proof trees.
+pub trait ProofTreeVisitor<'tcx> {
+ type BreakTy;
+
+ fn visit_goal(&mut self, goal: &InspectGoal<'_, 'tcx>) -> ControlFlow<Self::BreakTy>;
+}
+
+pub trait ProofTreeInferCtxtExt<'tcx> {
+ fn visit_proof_tree<V: ProofTreeVisitor<'tcx>>(
+ &self,
+ goal: Goal<'tcx, ty::Predicate<'tcx>>,
+ visitor: &mut V,
+ ) -> ControlFlow<V::BreakTy>;
+}
+
+impl<'tcx> ProofTreeInferCtxtExt<'tcx> for InferCtxt<'tcx> {
+ fn visit_proof_tree<V: ProofTreeVisitor<'tcx>>(
+ &self,
+ goal: Goal<'tcx, ty::Predicate<'tcx>>,
+ visitor: &mut V,
+ ) -> ControlFlow<V::BreakTy> {
+ let (_, proof_tree) =
+ self.evaluate_root_goal(goal, GenerateProofTree::Yes(UseGlobalCache::No));
+ let proof_tree = proof_tree.unwrap();
+ visitor.visit_goal(&InspectGoal::new(self, 0, &proof_tree))
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/solve/inspect/build.rs b/compiler/rustc_trait_selection/src/solve/inspect/build.rs
new file mode 100644
index 000000000..2eba98b02
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/solve/inspect/build.rs
@@ -0,0 +1,522 @@
+//! Building proof trees incrementally during trait solving.
+//!
+//! This code is *a bit* of a mess and can hopefully be
+//! mostly ignored. For a general overview of how it works,
+//! see the comment on [ProofTreeBuilder].
+use rustc_middle::traits::query::NoSolution;
+use rustc_middle::traits::solve::{
+ CanonicalInput, Certainty, Goal, IsNormalizesToHack, QueryInput, QueryResult,
+};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_session::config::DumpSolverProofTree;
+
+use crate::solve::eval_ctxt::UseGlobalCache;
+use crate::solve::{self, inspect, EvalCtxt, GenerateProofTree};
+
+/// The core data structure when building proof trees.
+///
+/// In case the current evaluation does not generate a proof
+/// tree, `state` is simply `None` and we avoid any work.
+///
+/// The possible states of the solver are represented via
+/// variants of [DebugSolver]. For any nested computation we call
+/// `ProofTreeBuilder::new_nested_computation_kind` which
+/// creates a new `ProofTreeBuilder` to temporarily replace the
+/// current one. Once that nested computation is done,
+/// `ProofTreeBuilder::nested_computation_kind` is called
+/// to add the finished nested evaluation to the parent.
+///
+/// We provide additional information to the current state
+/// by calling methods such as `ProofTreeBuilder::probe_kind`.
+///
+/// The actual structure closely mirrors the finished proof
+/// trees. At the end of trait solving `ProofTreeBuilder::finalize`
+/// is called to recursively convert the whole structure to a
+/// finished proof tree.
+pub(in crate::solve) struct ProofTreeBuilder<'tcx> {
+ state: Option<Box<BuilderData<'tcx>>>,
+}
+
+struct BuilderData<'tcx> {
+ tree: DebugSolver<'tcx>,
+ use_global_cache: UseGlobalCache,
+}
+
+/// The current state of the proof tree builder, at most places
+/// in the code, only one or two variants are actually possible.
+///
+/// We simply ICE in case that assumption is broken.
+#[derive(Debug)]
+enum DebugSolver<'tcx> {
+ Root,
+ GoalEvaluation(WipGoalEvaluation<'tcx>),
+ CanonicalGoalEvaluation(WipCanonicalGoalEvaluation<'tcx>),
+ AddedGoalsEvaluation(WipAddedGoalsEvaluation<'tcx>),
+ GoalEvaluationStep(WipGoalEvaluationStep<'tcx>),
+ Probe(WipProbe<'tcx>),
+}
+
+impl<'tcx> From<WipGoalEvaluation<'tcx>> for DebugSolver<'tcx> {
+ fn from(g: WipGoalEvaluation<'tcx>) -> DebugSolver<'tcx> {
+ DebugSolver::GoalEvaluation(g)
+ }
+}
+
+impl<'tcx> From<WipCanonicalGoalEvaluation<'tcx>> for DebugSolver<'tcx> {
+ fn from(g: WipCanonicalGoalEvaluation<'tcx>) -> DebugSolver<'tcx> {
+ DebugSolver::CanonicalGoalEvaluation(g)
+ }
+}
+
+impl<'tcx> From<WipAddedGoalsEvaluation<'tcx>> for DebugSolver<'tcx> {
+ fn from(g: WipAddedGoalsEvaluation<'tcx>) -> DebugSolver<'tcx> {
+ DebugSolver::AddedGoalsEvaluation(g)
+ }
+}
+
+impl<'tcx> From<WipGoalEvaluationStep<'tcx>> for DebugSolver<'tcx> {
+ fn from(g: WipGoalEvaluationStep<'tcx>) -> DebugSolver<'tcx> {
+ DebugSolver::GoalEvaluationStep(g)
+ }
+}
+
+impl<'tcx> From<WipProbe<'tcx>> for DebugSolver<'tcx> {
+ fn from(p: WipProbe<'tcx>) -> DebugSolver<'tcx> {
+ DebugSolver::Probe(p)
+ }
+}
+
+#[derive(Eq, PartialEq, Debug)]
+struct WipGoalEvaluation<'tcx> {
+ pub uncanonicalized_goal: Goal<'tcx, ty::Predicate<'tcx>>,
+ pub kind: WipGoalEvaluationKind<'tcx>,
+ pub evaluation: Option<WipCanonicalGoalEvaluation<'tcx>>,
+ pub returned_goals: Vec<Goal<'tcx, ty::Predicate<'tcx>>>,
+}
+
+impl<'tcx> WipGoalEvaluation<'tcx> {
+ fn finalize(self) -> inspect::GoalEvaluation<'tcx> {
+ inspect::GoalEvaluation {
+ uncanonicalized_goal: self.uncanonicalized_goal,
+ kind: match self.kind {
+ WipGoalEvaluationKind::Root { orig_values } => {
+ inspect::GoalEvaluationKind::Root { orig_values }
+ }
+ WipGoalEvaluationKind::Nested { is_normalizes_to_hack } => {
+ inspect::GoalEvaluationKind::Nested { is_normalizes_to_hack }
+ }
+ },
+ evaluation: self.evaluation.unwrap().finalize(),
+ returned_goals: self.returned_goals,
+ }
+ }
+}
+
+#[derive(Eq, PartialEq, Debug)]
+pub(in crate::solve) enum WipGoalEvaluationKind<'tcx> {
+ Root { orig_values: Vec<ty::GenericArg<'tcx>> },
+ Nested { is_normalizes_to_hack: IsNormalizesToHack },
+}
+
+#[derive(Eq, PartialEq, Debug)]
+pub(in crate::solve) enum WipCanonicalGoalEvaluationKind {
+ Overflow,
+ CacheHit(inspect::CacheHit),
+}
+
+#[derive(Eq, PartialEq, Debug)]
+struct WipCanonicalGoalEvaluation<'tcx> {
+ goal: CanonicalInput<'tcx>,
+ kind: Option<WipCanonicalGoalEvaluationKind>,
+ revisions: Vec<WipGoalEvaluationStep<'tcx>>,
+ result: Option<QueryResult<'tcx>>,
+}
+
+impl<'tcx> WipCanonicalGoalEvaluation<'tcx> {
+ fn finalize(self) -> inspect::CanonicalGoalEvaluation<'tcx> {
+ let kind = match self.kind {
+ Some(WipCanonicalGoalEvaluationKind::Overflow) => {
+ inspect::CanonicalGoalEvaluationKind::Overflow
+ }
+ Some(WipCanonicalGoalEvaluationKind::CacheHit(hit)) => {
+ inspect::CanonicalGoalEvaluationKind::CacheHit(hit)
+ }
+ None => inspect::CanonicalGoalEvaluationKind::Uncached {
+ revisions: self
+ .revisions
+ .into_iter()
+ .map(WipGoalEvaluationStep::finalize)
+ .collect(),
+ },
+ };
+
+ inspect::CanonicalGoalEvaluation { goal: self.goal, kind, result: self.result.unwrap() }
+ }
+}
+
+#[derive(Eq, PartialEq, Debug)]
+struct WipAddedGoalsEvaluation<'tcx> {
+ evaluations: Vec<Vec<WipGoalEvaluation<'tcx>>>,
+ result: Option<Result<Certainty, NoSolution>>,
+}
+
+impl<'tcx> WipAddedGoalsEvaluation<'tcx> {
+ fn finalize(self) -> inspect::AddedGoalsEvaluation<'tcx> {
+ inspect::AddedGoalsEvaluation {
+ evaluations: self
+ .evaluations
+ .into_iter()
+ .map(|evaluations| {
+ evaluations.into_iter().map(WipGoalEvaluation::finalize).collect()
+ })
+ .collect(),
+ result: self.result.unwrap(),
+ }
+ }
+}
+
+#[derive(Eq, PartialEq, Debug)]
+struct WipGoalEvaluationStep<'tcx> {
+ instantiated_goal: QueryInput<'tcx, ty::Predicate<'tcx>>,
+
+ evaluation: WipProbe<'tcx>,
+}
+
+impl<'tcx> WipGoalEvaluationStep<'tcx> {
+ fn finalize(self) -> inspect::GoalEvaluationStep<'tcx> {
+ let evaluation = self.evaluation.finalize();
+ match evaluation.kind {
+ inspect::ProbeKind::Root { .. } => (),
+ _ => unreachable!("unexpected root evaluation: {evaluation:?}"),
+ }
+ inspect::GoalEvaluationStep { instantiated_goal: self.instantiated_goal, evaluation }
+ }
+}
+
+#[derive(Eq, PartialEq, Debug)]
+struct WipProbe<'tcx> {
+ pub steps: Vec<WipProbeStep<'tcx>>,
+ pub kind: Option<inspect::ProbeKind<'tcx>>,
+}
+
+impl<'tcx> WipProbe<'tcx> {
+ fn finalize(self) -> inspect::Probe<'tcx> {
+ inspect::Probe {
+ steps: self.steps.into_iter().map(WipProbeStep::finalize).collect(),
+ kind: self.kind.unwrap(),
+ }
+ }
+}
+
+#[derive(Eq, PartialEq, Debug)]
+enum WipProbeStep<'tcx> {
+ AddGoal(inspect::CanonicalState<'tcx, Goal<'tcx, ty::Predicate<'tcx>>>),
+ EvaluateGoals(WipAddedGoalsEvaluation<'tcx>),
+ NestedProbe(WipProbe<'tcx>),
+}
+
+impl<'tcx> WipProbeStep<'tcx> {
+ fn finalize(self) -> inspect::ProbeStep<'tcx> {
+ match self {
+ WipProbeStep::AddGoal(goal) => inspect::ProbeStep::AddGoal(goal),
+ WipProbeStep::EvaluateGoals(eval) => inspect::ProbeStep::EvaluateGoals(eval.finalize()),
+ WipProbeStep::NestedProbe(probe) => inspect::ProbeStep::NestedProbe(probe.finalize()),
+ }
+ }
+}
+
+impl<'tcx> ProofTreeBuilder<'tcx> {
+ fn new(
+ state: impl Into<DebugSolver<'tcx>>,
+ use_global_cache: UseGlobalCache,
+ ) -> ProofTreeBuilder<'tcx> {
+ ProofTreeBuilder {
+ state: Some(Box::new(BuilderData { tree: state.into(), use_global_cache })),
+ }
+ }
+
+ fn nested<T: Into<DebugSolver<'tcx>>>(&self, state: impl FnOnce() -> T) -> Self {
+ match &self.state {
+ Some(prev_state) => Self {
+ state: Some(Box::new(BuilderData {
+ tree: state().into(),
+ use_global_cache: prev_state.use_global_cache,
+ })),
+ },
+ None => Self { state: None },
+ }
+ }
+
+ fn as_mut(&mut self) -> Option<&mut DebugSolver<'tcx>> {
+ self.state.as_mut().map(|boxed| &mut boxed.tree)
+ }
+
+ pub fn finalize(self) -> Option<inspect::GoalEvaluation<'tcx>> {
+ match self.state?.tree {
+ DebugSolver::GoalEvaluation(wip_goal_evaluation) => {
+ Some(wip_goal_evaluation.finalize())
+ }
+ root => unreachable!("unexpected proof tree builder root node: {:?}", root),
+ }
+ }
+
+ pub fn use_global_cache(&self) -> bool {
+ self.state
+ .as_ref()
+ .map(|state| matches!(state.use_global_cache, UseGlobalCache::Yes))
+ .unwrap_or(true)
+ }
+
+ pub fn new_maybe_root(
+ tcx: TyCtxt<'tcx>,
+ generate_proof_tree: GenerateProofTree,
+ ) -> ProofTreeBuilder<'tcx> {
+ match generate_proof_tree {
+ GenerateProofTree::Never => ProofTreeBuilder::new_noop(),
+ GenerateProofTree::IfEnabled => {
+ let opts = &tcx.sess.opts.unstable_opts;
+ match opts.dump_solver_proof_tree {
+ DumpSolverProofTree::Always => {
+ let use_cache = opts.dump_solver_proof_tree_use_cache.unwrap_or(true);
+ ProofTreeBuilder::new_root(UseGlobalCache::from_bool(use_cache))
+ }
+ // `OnError` is handled by reevaluating goals in error
+ // reporting with `GenerateProofTree::Yes`.
+ DumpSolverProofTree::OnError | DumpSolverProofTree::Never => {
+ ProofTreeBuilder::new_noop()
+ }
+ }
+ }
+ GenerateProofTree::Yes(use_cache) => ProofTreeBuilder::new_root(use_cache),
+ }
+ }
+
+ pub fn new_root(use_global_cache: UseGlobalCache) -> ProofTreeBuilder<'tcx> {
+ ProofTreeBuilder::new(DebugSolver::Root, use_global_cache)
+ }
+
+ pub fn new_noop() -> ProofTreeBuilder<'tcx> {
+ ProofTreeBuilder { state: None }
+ }
+
+ pub fn is_noop(&self) -> bool {
+ self.state.is_none()
+ }
+
+ pub(in crate::solve) fn new_goal_evaluation(
+ &mut self,
+ goal: Goal<'tcx, ty::Predicate<'tcx>>,
+ orig_values: &[ty::GenericArg<'tcx>],
+ kind: solve::GoalEvaluationKind,
+ ) -> ProofTreeBuilder<'tcx> {
+ self.nested(|| WipGoalEvaluation {
+ uncanonicalized_goal: goal,
+ kind: match kind {
+ solve::GoalEvaluationKind::Root => {
+ WipGoalEvaluationKind::Root { orig_values: orig_values.to_vec() }
+ }
+ solve::GoalEvaluationKind::Nested { is_normalizes_to_hack } => {
+ WipGoalEvaluationKind::Nested { is_normalizes_to_hack }
+ }
+ },
+ evaluation: None,
+ returned_goals: vec![],
+ })
+ }
+
+ pub fn new_canonical_goal_evaluation(
+ &mut self,
+ goal: CanonicalInput<'tcx>,
+ ) -> ProofTreeBuilder<'tcx> {
+ self.nested(|| WipCanonicalGoalEvaluation {
+ goal,
+ kind: None,
+ revisions: vec![],
+ result: None,
+ })
+ }
+
+ pub fn canonical_goal_evaluation(&mut self, canonical_goal_evaluation: ProofTreeBuilder<'tcx>) {
+ if let Some(this) = self.as_mut() {
+ match (this, canonical_goal_evaluation.state.unwrap().tree) {
+ (
+ DebugSolver::GoalEvaluation(goal_evaluation),
+ DebugSolver::CanonicalGoalEvaluation(canonical_goal_evaluation),
+ ) => goal_evaluation.evaluation = Some(canonical_goal_evaluation),
+ _ => unreachable!(),
+ }
+ }
+ }
+
+ pub fn goal_evaluation_kind(&mut self, kind: WipCanonicalGoalEvaluationKind) {
+ if let Some(this) = self.as_mut() {
+ match this {
+ DebugSolver::CanonicalGoalEvaluation(canonical_goal_evaluation) => {
+ assert_eq!(canonical_goal_evaluation.kind.replace(kind), None);
+ }
+ _ => unreachable!(),
+ };
+ }
+ }
+
+ pub fn returned_goals(&mut self, goals: &[Goal<'tcx, ty::Predicate<'tcx>>]) {
+ if let Some(this) = self.as_mut() {
+ match this {
+ DebugSolver::GoalEvaluation(evaluation) => {
+ assert!(evaluation.returned_goals.is_empty());
+ evaluation.returned_goals.extend(goals);
+ }
+ _ => unreachable!(),
+ }
+ }
+ }
+ pub fn goal_evaluation(&mut self, goal_evaluation: ProofTreeBuilder<'tcx>) {
+ if let Some(this) = self.as_mut() {
+ match (this, goal_evaluation.state.unwrap().tree) {
+ (
+ DebugSolver::AddedGoalsEvaluation(WipAddedGoalsEvaluation {
+ evaluations, ..
+ }),
+ DebugSolver::GoalEvaluation(goal_evaluation),
+ ) => evaluations.last_mut().unwrap().push(goal_evaluation),
+ (this @ DebugSolver::Root, goal_evaluation) => *this = goal_evaluation,
+ _ => unreachable!(),
+ }
+ }
+ }
+
+ pub fn new_goal_evaluation_step(
+ &mut self,
+ instantiated_goal: QueryInput<'tcx, ty::Predicate<'tcx>>,
+ ) -> ProofTreeBuilder<'tcx> {
+ self.nested(|| WipGoalEvaluationStep {
+ instantiated_goal,
+ evaluation: WipProbe { steps: vec![], kind: None },
+ })
+ }
+ pub fn goal_evaluation_step(&mut self, goal_evaluation_step: ProofTreeBuilder<'tcx>) {
+ if let Some(this) = self.as_mut() {
+ match (this, goal_evaluation_step.state.unwrap().tree) {
+ (
+ DebugSolver::CanonicalGoalEvaluation(canonical_goal_evaluations),
+ DebugSolver::GoalEvaluationStep(goal_evaluation_step),
+ ) => {
+ canonical_goal_evaluations.revisions.push(goal_evaluation_step);
+ }
+ _ => unreachable!(),
+ }
+ }
+ }
+
+ pub fn new_probe(&mut self) -> ProofTreeBuilder<'tcx> {
+ self.nested(|| WipProbe { steps: vec![], kind: None })
+ }
+
+ pub fn probe_kind(&mut self, probe_kind: inspect::ProbeKind<'tcx>) {
+ if let Some(this) = self.as_mut() {
+ match this {
+ DebugSolver::Probe(this) => {
+ assert_eq!(this.kind.replace(probe_kind), None)
+ }
+ _ => unreachable!(),
+ }
+ }
+ }
+
+ pub fn add_goal(ecx: &mut EvalCtxt<'_, 'tcx>, goal: Goal<'tcx, ty::Predicate<'tcx>>) {
+ // Can't use `if let Some(this) = ecx.inspect.as_mut()` here because
+ // we have to immutably use the `EvalCtxt` for `make_canonical_state`.
+ if ecx.inspect.is_noop() {
+ return;
+ }
+
+ let goal = Self::make_canonical_state(ecx, goal);
+
+ match ecx.inspect.as_mut().unwrap() {
+ DebugSolver::GoalEvaluationStep(WipGoalEvaluationStep {
+ evaluation: WipProbe { steps, .. },
+ ..
+ })
+ | DebugSolver::Probe(WipProbe { steps, .. }) => steps.push(WipProbeStep::AddGoal(goal)),
+ s => unreachable!("tried to add {goal:?} to {s:?}"),
+ }
+ }
+
+ pub fn finish_probe(&mut self, probe: ProofTreeBuilder<'tcx>) {
+ if let Some(this) = self.as_mut() {
+ match (this, probe.state.unwrap().tree) {
+ (
+ DebugSolver::Probe(WipProbe { steps, .. })
+ | DebugSolver::GoalEvaluationStep(WipGoalEvaluationStep {
+ evaluation: WipProbe { steps, .. },
+ ..
+ }),
+ DebugSolver::Probe(probe),
+ ) => steps.push(WipProbeStep::NestedProbe(probe)),
+ _ => unreachable!(),
+ }
+ }
+ }
+
+ pub fn new_evaluate_added_goals(&mut self) -> ProofTreeBuilder<'tcx> {
+ self.nested(|| WipAddedGoalsEvaluation { evaluations: vec![], result: None })
+ }
+
+ pub fn evaluate_added_goals_loop_start(&mut self) {
+ if let Some(this) = self.as_mut() {
+ match this {
+ DebugSolver::AddedGoalsEvaluation(this) => {
+ this.evaluations.push(vec![]);
+ }
+ _ => unreachable!(),
+ }
+ }
+ }
+
+ pub fn eval_added_goals_result(&mut self, result: Result<Certainty, NoSolution>) {
+ if let Some(this) = self.as_mut() {
+ match this {
+ DebugSolver::AddedGoalsEvaluation(this) => {
+ assert_eq!(this.result.replace(result), None);
+ }
+ _ => unreachable!(),
+ }
+ }
+ }
+
+ pub fn added_goals_evaluation(&mut self, added_goals_evaluation: ProofTreeBuilder<'tcx>) {
+ if let Some(this) = self.as_mut() {
+ match (this, added_goals_evaluation.state.unwrap().tree) {
+ (
+ DebugSolver::GoalEvaluationStep(WipGoalEvaluationStep {
+ evaluation: WipProbe { steps, .. },
+ ..
+ })
+ | DebugSolver::Probe(WipProbe { steps, .. }),
+ DebugSolver::AddedGoalsEvaluation(added_goals_evaluation),
+ ) => steps.push(WipProbeStep::EvaluateGoals(added_goals_evaluation)),
+ _ => unreachable!(),
+ }
+ }
+ }
+
+ pub fn query_result(&mut self, result: QueryResult<'tcx>) {
+ if let Some(this) = self.as_mut() {
+ match this {
+ DebugSolver::CanonicalGoalEvaluation(canonical_goal_evaluation) => {
+ assert_eq!(canonical_goal_evaluation.result.replace(result), None);
+ }
+ DebugSolver::GoalEvaluationStep(evaluation_step) => {
+ assert_eq!(
+ evaluation_step
+ .evaluation
+ .kind
+ .replace(inspect::ProbeKind::Root { result }),
+ None
+ );
+ }
+ _ => unreachable!(),
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/solve/inspect/mod.rs b/compiler/rustc_trait_selection/src/solve/inspect/mod.rs
new file mode 100644
index 000000000..60d52305a
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/solve/inspect/mod.rs
@@ -0,0 +1,7 @@
+pub use rustc_middle::traits::solve::inspect::*;
+
+mod build;
+pub(in crate::solve) use build::*;
+
+mod analyse;
+pub use analyse::*;
diff --git a/compiler/rustc_trait_selection/src/solve/mod.rs b/compiler/rustc_trait_selection/src/solve/mod.rs
index 75a99f799..77a3b5e12 100644
--- a/compiler/rustc_trait_selection/src/solve/mod.rs
+++ b/compiler/rustc_trait_selection/src/solve/mod.rs
@@ -19,7 +19,8 @@ use rustc_infer::infer::canonical::{Canonical, CanonicalVarValues};
use rustc_infer::traits::query::NoSolution;
use rustc_middle::infer::canonical::CanonicalVarInfos;
use rustc_middle::traits::solve::{
- CanonicalResponse, Certainty, ExternalConstraintsData, Goal, QueryResult, Response,
+ CanonicalResponse, Certainty, ExternalConstraintsData, Goal, IsNormalizesToHack, QueryResult,
+ Response,
};
use rustc_middle::ty::{self, Ty, TyCtxt, UniverseIndex};
use rustc_middle::ty::{
@@ -59,6 +60,12 @@ enum SolverMode {
Coherence,
}
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+enum GoalEvaluationKind {
+ Root,
+ Nested { is_normalizes_to_hack: IsNormalizesToHack },
+}
+
trait CanonicalResponseExt {
fn has_no_inference_or_external_constraints(&self) -> bool;
@@ -228,14 +235,15 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
#[instrument(level = "debug", skip(self))]
fn add_goal(&mut self, goal: Goal<'tcx, ty::Predicate<'tcx>>) {
+ inspect::ProofTreeBuilder::add_goal(self, goal);
self.nested_goals.goals.push(goal);
}
#[instrument(level = "debug", skip(self, goals))]
fn add_goals(&mut self, goals: impl IntoIterator<Item = Goal<'tcx, ty::Predicate<'tcx>>>) {
- let current_len = self.nested_goals.goals.len();
- self.nested_goals.goals.extend(goals);
- debug!("added_goals={:?}", &self.nested_goals.goals[current_len..]);
+ for goal in goals {
+ self.add_goal(goal);
+ }
}
/// Try to merge multiple possible ways to prove a goal, if that is not possible returns `None`.
diff --git a/compiler/rustc_trait_selection/src/solve/project_goals.rs b/compiler/rustc_trait_selection/src/solve/project_goals.rs
index e47e22877..0f9d36342 100644
--- a/compiler/rustc_trait_selection/src/solve/project_goals.rs
+++ b/compiler/rustc_trait_selection/src/solve/project_goals.rs
@@ -8,8 +8,9 @@ use rustc_hir::LangItem;
use rustc_infer::traits::query::NoSolution;
use rustc_infer::traits::specialization_graph::LeafDef;
use rustc_infer::traits::Reveal;
-use rustc_middle::traits::solve::inspect::CandidateKind;
-use rustc_middle::traits::solve::{CanonicalResponse, Certainty, Goal, QueryResult};
+use rustc_middle::traits::solve::{
+ CandidateSource, CanonicalResponse, Certainty, Goal, QueryResult,
+};
use rustc_middle::traits::BuiltinImplSource;
use rustc_middle::ty::fast_reject::{DeepRejectCtxt, TreatParams};
use rustc_middle::ty::ProjectionPredicate;
@@ -58,7 +59,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
}
DefKind::AnonConst => self.normalize_anon_const(goal),
DefKind::OpaqueTy => self.normalize_opaque_type(goal),
- DefKind::TyAlias { .. } => self.normalize_weak_type(goal),
+ DefKind::TyAlias => self.normalize_weak_type(goal),
kind => bug!("unknown DefKind {} in projection goal: {goal:#?}", kind.descr(def_id)),
}
}
@@ -113,7 +114,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
if let Some(projection_pred) = assumption.as_projection_clause() {
if projection_pred.projection_def_id() == goal.predicate.def_id() {
let tcx = ecx.tcx();
- ecx.probe_candidate("assumption").enter(|ecx| {
+ ecx.probe_misc_candidate("assumption").enter(|ecx| {
let assumption_projection_pred =
ecx.instantiate_binder_with_infer(projection_pred);
ecx.eq(
@@ -155,7 +156,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
return Err(NoSolution);
}
- ecx.probe(|r| CandidateKind::Candidate { name: "impl".into(), result: *r }).enter(|ecx| {
+ ecx.probe_trait_candidate(CandidateSource::Impl(impl_def_id)).enter(|ecx| {
let impl_args = ecx.fresh_args_for_item(impl_def_id);
let impl_trait_ref = impl_trait_ref.instantiate(tcx, impl_args);
@@ -244,7 +245,21 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
// Finally we construct the actual value of the associated type.
let term = match assoc_def.item.kind {
ty::AssocKind::Type => tcx.type_of(assoc_def.item.def_id).map_bound(|ty| ty.into()),
- ty::AssocKind::Const => bug!("associated const projection is not supported yet"),
+ ty::AssocKind::Const => {
+ if tcx.features().associated_const_equality {
+ bug!("associated const projection is not supported yet")
+ } else {
+ ty::EarlyBinder::bind(
+ ty::Const::new_error_with_message(
+ tcx,
+ tcx.type_of(assoc_def.item.def_id).instantiate_identity(),
+ DUMMY_SP,
+ "associated const projection is not supported yet",
+ )
+ .into(),
+ )
+ }
+ }
ty::AssocKind::Fn => unreachable!("we should never project to a fn"),
};
@@ -331,13 +346,15 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
ty::TraitRef::from_lang_item(tcx, LangItem::Sized, DUMMY_SP, [output])
});
- let pred = tupled_inputs_and_output
- .map_bound(|(inputs, output)| ty::ProjectionPredicate {
+ let pred = ty::Clause::from_projection_clause(
+ tcx,
+ tupled_inputs_and_output.map_bound(|(inputs, output)| ty::ProjectionPredicate {
projection_ty: tcx
.mk_alias_ty(goal.predicate.def_id(), [goal.predicate.self_ty(), inputs]),
term: output.into(),
- })
- .to_predicate(tcx);
+ }),
+ );
+
// A built-in `Fn` impl only holds if the output is sized.
// (FIXME: technically we only need to check this if the type is a fn ptr...)
Self::consider_implied_clause(ecx, goal, pred, [goal.with(tcx, output_is_sized_pred)])
@@ -355,7 +372,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx> {
let tcx = ecx.tcx();
- ecx.probe_candidate("builtin pointee").enter(|ecx| {
+ ecx.probe_misc_candidate("builtin pointee").enter(|ecx| {
let metadata_ty = match goal.predicate.self_ty().kind() {
ty::Bool
| ty::Char
@@ -371,7 +388,6 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
| ty::Infer(ty::IntVar(..) | ty::FloatVar(..))
| ty::Generator(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Never
| ty::Foreign(..) => tcx.types.unit,
@@ -539,7 +555,6 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
| ty::Infer(ty::IntVar(..) | ty::FloatVar(..))
| ty::Generator(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Never
| ty::Foreign(..)
| ty::Adt(_, _)
@@ -564,7 +579,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for ProjectionPredicate<'tcx> {
),
};
- ecx.probe_candidate("builtin discriminant kind").enter(|ecx| {
+ ecx.probe_misc_candidate("builtin discriminant kind").enter(|ecx| {
ecx.eq(goal.param_env, goal.predicate.term, discriminant_ty.into())
.expect("expected goal term to be fully unconstrained");
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
diff --git a/compiler/rustc_trait_selection/src/solve/search_graph/cache.rs b/compiler/rustc_trait_selection/src/solve/search_graph/cache.rs
deleted file mode 100644
index be48447e2..000000000
--- a/compiler/rustc_trait_selection/src/solve/search_graph/cache.rs
+++ /dev/null
@@ -1,102 +0,0 @@
-//! This module both handles the global cache which stores "finished" goals,
-//! and the provisional cache which contains partially computed goals.
-//!
-//! The provisional cache is necessary when dealing with coinductive cycles.
-//!
-//! For more information about the provisional cache and coinduction in general,
-//! check out the relevant section of the rustc-dev-guide.
-//!
-//! FIXME(@lcnr): Write that section, feel free to ping me if you need help here
-//! before then or if I still haven't done that before January 2023.
-use super::StackDepth;
-use rustc_data_structures::fx::FxHashMap;
-use rustc_index::IndexVec;
-use rustc_middle::traits::solve::{CanonicalInput, QueryResult};
-
-rustc_index::newtype_index! {
- pub struct EntryIndex {}
-}
-
-#[derive(Debug, Clone)]
-pub(super) struct ProvisionalEntry<'tcx> {
- /// In case we have a coinductive cycle, this is the
- /// the current provisional result of this goal.
- ///
- /// This starts out as `None` for all goals and gets to some
- /// when the goal gets popped from the stack or we rerun evaluation
- /// for this goal to reach a fixpoint.
- pub(super) response: Option<QueryResult<'tcx>>,
- /// In case of a cycle, the position of deepest stack entry involved
- /// in that cycle. This is monotonically decreasing in the stack as all
- /// elements between the current stack element in the deepest stack entry
- /// involved have to also be involved in that cycle.
- ///
- /// We can only move entries to the global cache once we're complete done
- /// with the cycle. If this entry has not been involved in a cycle,
- /// this is just its own depth.
- pub(super) depth: StackDepth,
-
- /// The goal for this entry. Should always be equal to the corresponding goal
- /// in the lookup table.
- pub(super) input: CanonicalInput<'tcx>,
-}
-
-pub(super) struct ProvisionalCache<'tcx> {
- pub(super) entries: IndexVec<EntryIndex, ProvisionalEntry<'tcx>>,
- // FIXME: This is only used to quickly check whether a given goal
- // is in the cache. We should experiment with using something like
- // `SsoHashSet` here because in most cases there are only a few entries.
- pub(super) lookup_table: FxHashMap<CanonicalInput<'tcx>, EntryIndex>,
-}
-
-impl<'tcx> ProvisionalCache<'tcx> {
- pub(super) fn empty() -> ProvisionalCache<'tcx> {
- ProvisionalCache { entries: Default::default(), lookup_table: Default::default() }
- }
-
- pub(super) fn is_empty(&self) -> bool {
- self.entries.is_empty() && self.lookup_table.is_empty()
- }
-
- /// Adds a dependency from the current leaf to `target` in the cache
- /// to prevent us from moving any goals which depend on the current leaf
- /// to the global cache while we're still computing `target`.
- ///
- /// Its important to note that `target` may already be part of a different cycle.
- /// In this case we have to ensure that we also depend on all other goals
- /// in the existing cycle in addition to the potentially direct cycle with `target`.
- pub(super) fn add_dependency_of_leaf_on(&mut self, target: EntryIndex) {
- let depth = self.entries[target].depth;
- for provisional_entry in &mut self.entries.raw[target.index()..] {
- // The depth of `target` is the position of the deepest goal in the stack
- // on which `target` depends. That goal is the `root` of this cycle.
- //
- // Any entry which was added after `target` is either on the stack itself
- // at which point its depth is definitely at least as high as the depth of
- // `root`. If it's not on the stack itself it has to depend on a goal
- // between `root` and `leaf`. If it were to depend on a goal deeper in the
- // stack than `root`, then `root` would also depend on that goal, at which
- // point `root` wouldn't be the root anymore.
- debug_assert!(provisional_entry.depth >= depth);
- provisional_entry.depth = depth;
- }
-
- // We only update entries which were added after `target` as no other
- // entry should have a higher depth.
- //
- // Any entry which previously had a higher depth than target has to
- // be between `target` and `root`. Because of this we would have updated
- // its depth when calling `add_dependency_of_leaf_on(root)` for `target`.
- if cfg!(debug_assertions) {
- self.entries.iter().all(|e| e.depth <= depth);
- }
- }
-
- pub(super) fn depth(&self, entry_index: EntryIndex) -> StackDepth {
- self.entries[entry_index].depth
- }
-
- pub(super) fn provisional_result(&self, entry_index: EntryIndex) -> Option<QueryResult<'tcx>> {
- self.entries[entry_index].response
- }
-}
diff --git a/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs b/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs
index 49ebfa4e6..33513f6bd 100644
--- a/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs
+++ b/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs
@@ -1,13 +1,11 @@
-mod cache;
-
-use self::cache::ProvisionalEntry;
+use super::inspect;
use super::inspect::ProofTreeBuilder;
use super::SolverMode;
-use cache::ProvisionalCache;
+use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::fx::FxHashSet;
use rustc_index::Idx;
use rustc_index::IndexVec;
-use rustc_middle::dep_graph::DepKind;
+use rustc_middle::dep_graph::dep_kinds;
use rustc_middle::traits::solve::inspect::CacheHit;
use rustc_middle::traits::solve::CacheData;
use rustc_middle::traits::solve::{CanonicalInput, Certainty, EvaluationCache, QueryResult};
@@ -26,8 +24,14 @@ struct StackEntry<'tcx> {
// The maximum depth reached by this stack entry, only up-to date
// for the top of the stack and lazily updated for the rest.
reached_depth: StackDepth,
+ // In case of a cycle, the depth of the root.
+ cycle_root_depth: StackDepth,
+
encountered_overflow: bool,
has_been_used: bool,
+ /// Starts out as `None` and gets set when rerunning this
+ /// goal in case we encounter a cycle.
+ provisional_result: Option<QueryResult<'tcx>>,
/// We put only the root goal of a coinductive cycle into the global cache.
///
@@ -46,16 +50,16 @@ pub(super) struct SearchGraph<'tcx> {
///
/// An element is *deeper* in the stack if its index is *lower*.
stack: IndexVec<StackDepth, StackEntry<'tcx>>,
- provisional_cache: ProvisionalCache<'tcx>,
+ stack_entries: FxHashMap<CanonicalInput<'tcx>, StackDepth>,
}
impl<'tcx> SearchGraph<'tcx> {
pub(super) fn new(tcx: TyCtxt<'tcx>, mode: SolverMode) -> SearchGraph<'tcx> {
Self {
mode,
- local_overflow_limit: tcx.recursion_limit().0.ilog2() as usize,
+ local_overflow_limit: tcx.recursion_limit().0.checked_ilog2().unwrap_or(0) as usize,
stack: Default::default(),
- provisional_cache: ProvisionalCache::empty(),
+ stack_entries: Default::default(),
}
}
@@ -84,6 +88,7 @@ impl<'tcx> SearchGraph<'tcx> {
/// would cause us to not track overflow and recursion depth correctly.
fn pop_stack(&mut self) -> StackEntry<'tcx> {
let elem = self.stack.pop().unwrap();
+ assert!(self.stack_entries.remove(&elem.input).is_some());
if let Some(last) = self.stack.raw.last_mut() {
last.reached_depth = last.reached_depth.max(elem.reached_depth);
last.encountered_overflow |= elem.encountered_overflow;
@@ -103,22 +108,17 @@ impl<'tcx> SearchGraph<'tcx> {
}
pub(super) fn is_empty(&self) -> bool {
- self.stack.is_empty() && self.provisional_cache.is_empty()
+ self.stack.is_empty()
}
/// Whether we're currently in a cycle. This should only be used
/// for debug assertions.
pub(super) fn in_cycle(&self) -> bool {
if let Some(stack_depth) = self.stack.last_index() {
- // Either the current goal on the stack is the root of a cycle...
- if self.stack[stack_depth].has_been_used {
- return true;
- }
-
- // ...or it depends on a goal with a lower depth.
- let current_goal = self.stack[stack_depth].input;
- let entry_index = self.provisional_cache.lookup_table[&current_goal];
- self.provisional_cache.entries[entry_index].depth != stack_depth
+ // Either the current goal on the stack is the root of a cycle
+ // or it depends on a goal with a lower depth.
+ self.stack[stack_depth].has_been_used
+ || self.stack[stack_depth].cycle_root_depth != stack_depth
} else {
false
}
@@ -185,6 +185,8 @@ impl<'tcx> SearchGraph<'tcx> {
if let Some(last) = self.stack.raw.last_mut() {
last.encountered_overflow = true;
}
+
+ inspect.goal_evaluation_kind(inspect::WipCanonicalGoalEvaluationKind::Overflow);
return Self::response_no_constraints(tcx, input, Certainty::OVERFLOW);
};
@@ -200,14 +202,16 @@ impl<'tcx> SearchGraph<'tcx> {
available_depth,
)
{
+ inspect.goal_evaluation_kind(inspect::WipCanonicalGoalEvaluationKind::CacheHit(
+ CacheHit::Global,
+ ));
self.on_cache_hit(reached_depth, encountered_overflow);
return result;
}
}
- // Look at the provisional cache to detect cycles.
- let cache = &mut self.provisional_cache;
- match cache.lookup_table.entry(input) {
+ // Check whether we're in a cycle.
+ match self.stack_entries.entry(input) {
// No entry, we push this goal on the stack and try to prove it.
Entry::Vacant(v) => {
let depth = self.stack.next_index();
@@ -215,14 +219,14 @@ impl<'tcx> SearchGraph<'tcx> {
input,
available_depth,
reached_depth: depth,
+ cycle_root_depth: depth,
encountered_overflow: false,
has_been_used: false,
+ provisional_result: None,
cycle_participants: Default::default(),
};
assert_eq!(self.stack.push(entry), depth);
- let entry_index =
- cache.entries.push(ProvisionalEntry { response: None, depth, input });
- v.insert(entry_index);
+ v.insert(depth);
}
// We have a nested goal which relies on a goal `root` deeper in the stack.
//
@@ -233,39 +237,50 @@ impl<'tcx> SearchGraph<'tcx> {
//
// Finally we can return either the provisional response for that goal if we have a
// coinductive cycle or an ambiguous result if the cycle is inductive.
- Entry::Occupied(entry_index) => {
- inspect.cache_hit(CacheHit::Provisional);
+ Entry::Occupied(entry) => {
+ inspect.goal_evaluation_kind(inspect::WipCanonicalGoalEvaluationKind::CacheHit(
+ CacheHit::Provisional,
+ ));
- let entry_index = *entry_index.get();
- let stack_depth = cache.depth(entry_index);
+ let stack_depth = *entry.get();
debug!("encountered cycle with depth {stack_depth:?}");
-
- cache.add_dependency_of_leaf_on(entry_index);
- let mut iter = self.stack.iter_mut();
- let root = iter.nth(stack_depth.as_usize()).unwrap();
- for e in iter {
- root.cycle_participants.insert(e.input);
+ // We start by updating the root depth of all cycle participants, and
+ // add all cycle participants to the root.
+ let root_depth = self.stack[stack_depth].cycle_root_depth;
+ let (prev, participants) = self.stack.raw.split_at_mut(stack_depth.as_usize() + 1);
+ let root = &mut prev[root_depth.as_usize()];
+ for entry in participants {
+ debug_assert!(entry.cycle_root_depth >= root_depth);
+ entry.cycle_root_depth = root_depth;
+ root.cycle_participants.insert(entry.input);
+ // FIXME(@lcnr): I believe that this line is needed as we could
+ // otherwise access a cache entry for the root of a cycle while
+ // computing the result for a cycle participant. This can result
+ // in unstable results due to incompleteness.
+ //
+ // However, a test for this would be an even more complex version of
+ // tests/ui/traits/new-solver/coinduction/incompleteness-unstable-result.rs.
+ // I did not bother to write such a test and we have no regression test
+ // for this. It would be good to have such a test :)
+ #[allow(rustc::potential_query_instability)]
+ root.cycle_participants.extend(entry.cycle_participants.drain());
}
- // If we're in a cycle, we have to retry proving the current goal
- // until we reach a fixpoint.
+ // If we're in a cycle, we have to retry proving the cycle head
+ // until we reach a fixpoint. It is not enough to simply retry the
+ // `root` goal of this cycle.
+ //
+ // See tests/ui/traits/new-solver/cycles/fixpoint-rerun-all-cycle-heads.rs
+ // for an example.
self.stack[stack_depth].has_been_used = true;
- return if let Some(result) = cache.provisional_result(entry_index) {
+ return if let Some(result) = self.stack[stack_depth].provisional_result {
result
} else {
- // If we don't have a provisional result yet, the goal has to
- // still be on the stack.
- let mut goal_on_stack = false;
- let mut is_coinductive = true;
- for entry in self.stack.raw[stack_depth.index()..]
+ // If we don't have a provisional result yet we're in the first iteration,
+ // so we start with no constraints.
+ let is_coinductive = self.stack.raw[stack_depth.index()..]
.iter()
- .skip_while(|entry| entry.input != input)
- {
- goal_on_stack = true;
- is_coinductive &= entry.input.value.goal.predicate.is_coinductive(tcx);
- }
- debug_assert!(goal_on_stack);
-
+ .all(|entry| entry.input.value.goal.predicate.is_coinductive(tcx));
if is_coinductive {
Self::response_no_constraints(tcx, input, Certainty::Yes)
} else {
@@ -279,47 +294,32 @@ impl<'tcx> SearchGraph<'tcx> {
// Everything that affects the `result` should be performed within this
// `with_anon_task` closure.
let ((final_entry, result), dep_node) =
- tcx.dep_graph.with_anon_task(tcx, DepKind::TraitSelect, || {
+ tcx.dep_graph.with_anon_task(tcx, dep_kinds::TraitSelect, || {
// When we encounter a coinductive cycle, we have to fetch the
// result of that cycle while we are still computing it. Because
// of this we continuously recompute the cycle until the result
// of the previous iteration is equal to the final result, at which
// point we are done.
for _ in 0..self.local_overflow_limit() {
- let response = prove_goal(self, inspect);
+ let result = prove_goal(self, inspect);
// Check whether the current goal is the root of a cycle and whether
// we have to rerun because its provisional result differed from the
// final result.
- //
- // Also update the response for this goal stored in the provisional
- // cache.
let stack_entry = self.pop_stack();
debug_assert_eq!(stack_entry.input, input);
- let cache = &mut self.provisional_cache;
- let provisional_entry_index =
- *cache.lookup_table.get(&stack_entry.input).unwrap();
- let provisional_entry = &mut cache.entries[provisional_entry_index];
if stack_entry.has_been_used
- && provisional_entry.response.map_or(true, |r| r != response)
+ && stack_entry.provisional_result.map_or(true, |r| r != result)
{
- // If so, update the provisional result for this goal and remove
- // all entries whose result depends on this goal from the provisional
- // cache...
- //
- // That's not completely correct, as a nested goal can also only
- // depend on a goal which is lower in the stack so it doesn't
- // actually depend on the current goal. This should be fairly
- // rare and is hopefully not relevant for performance.
- provisional_entry.response = Some(response);
- #[allow(rustc::potential_query_instability)]
- cache.lookup_table.retain(|_key, index| *index <= provisional_entry_index);
- cache.entries.truncate(provisional_entry_index.index() + 1);
-
- // ...and finally push our goal back on the stack and reevaluate it.
- self.stack.push(StackEntry { has_been_used: false, ..stack_entry });
+ // If so, update its provisional result and reevaluate it.
+ let depth = self.stack.push(StackEntry {
+ has_been_used: false,
+ provisional_result: Some(result),
+ ..stack_entry
+ });
+ assert_eq!(self.stack_entries.insert(input, depth), None);
} else {
- return (stack_entry, response);
+ return (stack_entry, result);
}
}
@@ -335,17 +335,7 @@ impl<'tcx> SearchGraph<'tcx> {
//
// It is not possible for any nested goal to depend on something deeper on the
// stack, as this would have also updated the depth of the current goal.
- let cache = &mut self.provisional_cache;
- let provisional_entry_index = *cache.lookup_table.get(&input).unwrap();
- let provisional_entry = &mut cache.entries[provisional_entry_index];
- let depth = provisional_entry.depth;
- if depth == self.stack.next_index() {
- for (i, entry) in cache.entries.drain_enumerated(provisional_entry_index.index()..) {
- let actual_index = cache.lookup_table.remove(&entry.input);
- debug_assert_eq!(Some(i), actual_index);
- debug_assert!(entry.depth == depth);
- }
-
+ if final_entry.cycle_root_depth == self.stack.next_index() {
// When encountering a cycle, both inductive and coinductive, we only
// move the root into the global cache. We also store all other cycle
// participants involved.
@@ -363,8 +353,6 @@ impl<'tcx> SearchGraph<'tcx> {
dep_node,
result,
)
- } else {
- provisional_entry.response = Some(result);
}
result
diff --git a/compiler/rustc_trait_selection/src/solve/trait_goals.rs b/compiler/rustc_trait_selection/src/solve/trait_goals.rs
index 8685f3100..8055c63b9 100644
--- a/compiler/rustc_trait_selection/src/solve/trait_goals.rs
+++ b/compiler/rustc_trait_selection/src/solve/trait_goals.rs
@@ -5,7 +5,7 @@ use super::{EvalCtxt, SolverMode};
use rustc_hir::def_id::DefId;
use rustc_hir::{LangItem, Movability};
use rustc_infer::traits::query::NoSolution;
-use rustc_middle::traits::solve::inspect::CandidateKind;
+use rustc_middle::traits::solve::inspect::ProbeKind;
use rustc_middle::traits::solve::{CanonicalResponse, Certainty, Goal, QueryResult};
use rustc_middle::traits::{BuiltinImplSource, Reveal};
use rustc_middle::ty::fast_reject::{DeepRejectCtxt, TreatParams, TreatProjections};
@@ -61,7 +61,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
},
};
- ecx.probe_candidate("impl").enter(|ecx| {
+ ecx.probe_misc_candidate("impl").enter(|ecx| {
let impl_args = ecx.fresh_args_for_item(impl_def_id);
let impl_trait_ref = impl_trait_ref.instantiate(tcx, impl_args);
@@ -96,7 +96,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
&& trait_clause.polarity() == goal.predicate.polarity
{
// FIXME: Constness
- ecx.probe_candidate("assumption").enter(|ecx| {
+ ecx.probe_misc_candidate("assumption").enter(|ecx| {
let assumption_trait_pred = ecx.instantiate_binder_with_infer(trait_clause);
ecx.eq(
goal.param_env,
@@ -167,7 +167,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
let tcx = ecx.tcx();
- ecx.probe_candidate("trait alias").enter(|ecx| {
+ ecx.probe_misc_candidate("trait alias").enter(|ecx| {
let nested_obligations = tcx
.predicates_of(goal.predicate.def_id())
.instantiate(tcx, goal.predicate.trait_ref.args);
@@ -427,7 +427,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
ecx: &mut EvalCtxt<'_, 'tcx>,
goal: Goal<'tcx, Self>,
) -> QueryResult<'tcx> {
- ecx.probe(|_| CandidateKind::UnsizeAssembly).enter(|ecx| {
+ ecx.probe(|_| ProbeKind::UnsizeAssembly).enter(|ecx| {
let a_ty = goal.predicate.self_ty();
// We need to normalize the b_ty since it's destructured as a `dyn Trait`.
let Some(b_ty) =
@@ -491,7 +491,7 @@ impl<'tcx> assembly::GoalKind<'tcx> for TraitPredicate<'tcx> {
Err(NoSolution) => vec![],
};
- ecx.probe(|_| CandidateKind::UnsizeAssembly).enter(|ecx| {
+ ecx.probe(|_| ProbeKind::UnsizeAssembly).enter(|ecx| {
let a_ty = goal.predicate.self_ty();
// We need to normalize the b_ty since it's matched structurally
// in the other functions below.
@@ -597,7 +597,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
self.walk_vtable(
a_principal.with_self_ty(tcx, a_ty),
|ecx, new_a_principal, _, vtable_vptr_slot| {
- if let Ok(resp) = ecx.probe_candidate("dyn upcast").enter(|ecx| {
+ if let Ok(resp) = ecx.probe_misc_candidate("dyn upcast").enter(|ecx| {
ecx.consider_builtin_upcast_to_principal(
goal,
a_data,
@@ -640,7 +640,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
target_projection: ty::PolyExistentialProjection<'tcx>| {
source_projection.item_def_id() == target_projection.item_def_id()
&& ecx
- .probe(|_| CandidateKind::UpcastProbe)
+ .probe(|_| ProbeKind::UpcastProjectionCompatibility)
.enter(|ecx| -> Result<(), NoSolution> {
ecx.eq(param_env, source_projection, target_projection)?;
let _ = ecx.try_evaluate_added_goals()?;
@@ -879,8 +879,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
| ty::FnPtr(_)
| ty::Closure(_, _)
| ty::Generator(_, _, _)
- | ty::GeneratorWitness(_)
- | ty::GeneratorWitnessMIR(_, _)
+ | ty::GeneratorWitness(..)
| ty::Never
| ty::Tuple(_)
| ty::Adt(_, _)
@@ -918,7 +917,7 @@ impl<'tcx> EvalCtxt<'_, 'tcx> {
goal: Goal<'tcx, TraitPredicate<'tcx>>,
constituent_tys: impl Fn(&EvalCtxt<'_, 'tcx>, Ty<'tcx>) -> Result<Vec<Ty<'tcx>>, NoSolution>,
) -> QueryResult<'tcx> {
- self.probe_candidate("constituent tys").enter(|ecx| {
+ self.probe_misc_candidate("constituent tys").enter(|ecx| {
ecx.add_goals(
constituent_tys(ecx, goal.predicate.self_ty())?
.into_iter()
diff --git a/compiler/rustc_trait_selection/src/traits/auto_trait.rs b/compiler/rustc_trait_selection/src/traits/auto_trait.rs
index ba5000da6..8096d7969 100644
--- a/compiler/rustc_trait_selection/src/traits/auto_trait.rs
+++ b/compiler/rustc_trait_selection/src/traits/auto_trait.rs
@@ -793,7 +793,7 @@ impl<'tcx> AutoTraitFinder<'tcx> {
span: tcx.def_span(unevaluated.def),
unevaluated: unevaluated,
});
- Err(ErrorHandled::Reported(reported.into()))
+ Err(ErrorHandled::Reported(reported.into(), tcx.def_span(unevaluated.def)))
}
Err(err) => Err(err),
}
diff --git a/compiler/rustc_trait_selection/src/traits/coherence.rs b/compiler/rustc_trait_selection/src/traits/coherence.rs
index 5746781ae..acab4498a 100644
--- a/compiler/rustc_trait_selection/src/traits/coherence.rs
+++ b/compiler/rustc_trait_selection/src/traits/coherence.rs
@@ -6,9 +6,15 @@
use crate::infer::outlives::env::OutlivesEnvironment;
use crate::infer::InferOk;
+use crate::solve::inspect;
+use crate::solve::inspect::{InspectGoal, ProofTreeInferCtxtExt, ProofTreeVisitor};
+use crate::traits::engine::TraitEngineExt;
use crate::traits::outlives_bounds::InferCtxtExt as _;
+use crate::traits::query::evaluate_obligation::InferCtxtExt;
use crate::traits::select::{IntercrateAmbiguityCause, TreatInductiveCycleAs};
+use crate::traits::structural_normalize::StructurallyNormalizeExt;
use crate::traits::util::impl_subject_and_oblig;
+use crate::traits::NormalizeExt;
use crate::traits::SkipLeakCheck;
use crate::traits::{
self, Obligation, ObligationCause, ObligationCtxt, PredicateObligation, PredicateObligations,
@@ -18,10 +24,13 @@ use rustc_data_structures::fx::FxIndexSet;
use rustc_errors::Diagnostic;
use rustc_hir::def_id::{DefId, CRATE_DEF_ID, LOCAL_CRATE};
use rustc_infer::infer::{DefineOpaqueTypes, InferCtxt, TyCtxtInferExt};
-use rustc_infer::traits::util;
+use rustc_infer::traits::{util, TraitEngine};
+use rustc_middle::traits::query::NoSolution;
+use rustc_middle::traits::solve::{Certainty, Goal};
use rustc_middle::traits::specialization_graph::OverlapMode;
use rustc_middle::traits::DefiningAnchor;
use rustc_middle::ty::fast_reject::{DeepRejectCtxt, TreatParams};
+use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::visit::{TypeVisitable, TypeVisitableExt};
use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitor};
use rustc_session::lint::builtin::COINDUCTIVE_OVERLAP_IN_COHERENCE;
@@ -31,9 +40,6 @@ use std::fmt::Debug;
use std::iter;
use std::ops::ControlFlow;
-use super::query::evaluate_obligation::InferCtxtExt;
-use super::NormalizeExt;
-
/// Whether we do the orphan check relative to this crate or
/// to some remote crate.
#[derive(Copy, Clone, Debug)]
@@ -152,16 +158,14 @@ fn with_fresh_ty_vars<'cx, 'tcx>(
.predicates_of(impl_def_id)
.instantiate(tcx, impl_args)
.iter()
- .map(|(c, s)| (c.as_predicate(), s))
+ .map(|(c, _)| c.as_predicate())
.collect(),
};
- let InferOk { value: mut header, obligations } = selcx
- .infcx
- .at(&ObligationCause::dummy_with_span(tcx.def_span(impl_def_id)), param_env)
- .normalize(header);
+ let InferOk { value: mut header, obligations } =
+ selcx.infcx.at(&ObligationCause::dummy(), param_env).normalize(header);
- header.predicates.extend(obligations.into_iter().map(|o| (o.predicate, o.cause.span)));
+ header.predicates.extend(obligations.into_iter().map(|o| o.predicate));
header
}
@@ -207,19 +211,19 @@ fn overlap<'tcx>(
// Equate the headers to find their intersection (the general type, with infer vars,
// that may apply both impls).
- let equate_obligations = equate_impl_headers(selcx.infcx, &impl1_header, &impl2_header)?;
+ let mut obligations = equate_impl_headers(selcx.infcx, &impl1_header, &impl2_header)?;
debug!("overlap: unification check succeeded");
+ obligations.extend(
+ [&impl1_header.predicates, &impl2_header.predicates].into_iter().flatten().map(
+ |&predicate| Obligation::new(infcx.tcx, ObligationCause::dummy(), param_env, predicate),
+ ),
+ );
+
if overlap_mode.use_implicit_negative() {
for mode in [TreatInductiveCycleAs::Ambig, TreatInductiveCycleAs::Recur] {
if let Some(failing_obligation) = selcx.with_treat_inductive_cycle_as(mode, |selcx| {
- impl_intersection_has_impossible_obligation(
- selcx,
- param_env,
- &impl1_header,
- &impl2_header,
- &equate_obligations,
- )
+ impl_intersection_has_impossible_obligation(selcx, &obligations)
}) {
if matches!(mode, TreatInductiveCycleAs::Recur) {
let first_local_impl = impl1_header
@@ -261,17 +265,11 @@ fn overlap<'tcx>(
infcx.tcx.def_span(impl2_header.impl_def_id),
"the second impl is here",
);
- if !failing_obligation.cause.span.is_dummy() {
- lint.span_label(
- failing_obligation.cause.span,
- format!(
- "`{}` may be considered to hold in future releases, \
- causing the impls to overlap",
- infcx
- .resolve_vars_if_possible(failing_obligation.predicate)
- ),
- );
- }
+ lint.note(format!(
+ "`{}` may be considered to hold in future releases, \
+ causing the impls to overlap",
+ infcx.resolve_vars_if_possible(failing_obligation.predicate)
+ ));
lint
},
);
@@ -289,7 +287,14 @@ fn overlap<'tcx>(
return None;
}
- let intercrate_ambiguity_causes = selcx.take_intercrate_ambiguity_causes();
+ let intercrate_ambiguity_causes = if !overlap_mode.use_implicit_negative() {
+ Default::default()
+ } else if infcx.next_trait_solver() {
+ compute_intercrate_ambiguity_causes(&infcx, &obligations)
+ } else {
+ selcx.take_intercrate_ambiguity_causes()
+ };
+
debug!("overlap: intercrate_ambiguity_causes={:#?}", intercrate_ambiguity_causes);
let involves_placeholder = infcx
.inner
@@ -343,34 +348,24 @@ fn equate_impl_headers<'tcx>(
/// of the two impls above to be empty.
///
/// Importantly, this works even if there isn't a `impl !Error for MyLocalType`.
-fn impl_intersection_has_impossible_obligation<'cx, 'tcx>(
+fn impl_intersection_has_impossible_obligation<'a, 'cx, 'tcx>(
selcx: &mut SelectionContext<'cx, 'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- impl1_header: &ty::ImplHeader<'tcx>,
- impl2_header: &ty::ImplHeader<'tcx>,
- obligations: &PredicateObligations<'tcx>,
-) -> Option<PredicateObligation<'tcx>> {
+ obligations: &'a [PredicateObligation<'tcx>],
+) -> Option<&'a PredicateObligation<'tcx>> {
let infcx = selcx.infcx;
- [&impl1_header.predicates, &impl2_header.predicates]
- .into_iter()
- .flatten()
- .map(|&(predicate, span)| {
- Obligation::new(infcx.tcx, ObligationCause::dummy_with_span(span), param_env, predicate)
- })
- .chain(obligations.into_iter().cloned())
- .find(|obligation: &PredicateObligation<'tcx>| {
- if infcx.next_trait_solver() {
- infcx.evaluate_obligation(obligation).map_or(false, |result| !result.may_apply())
- } else {
- // We use `evaluate_root_obligation` to correctly track intercrate
- // ambiguity clauses. We cannot use this in the new solver.
- selcx.evaluate_root_obligation(obligation).map_or(
- false, // Overflow has occurred, and treat the obligation as possibly holding.
- |result| !result.may_apply(),
- )
- }
- })
+ obligations.iter().find(|obligation| {
+ if infcx.next_trait_solver() {
+ infcx.evaluate_obligation(obligation).map_or(false, |result| !result.may_apply())
+ } else {
+ // We use `evaluate_root_obligation` to correctly track intercrate
+ // ambiguity clauses. We cannot use this in the new solver.
+ selcx.evaluate_root_obligation(obligation).map_or(
+ false, // Overflow has occurred, and treat the obligation as possibly holding.
+ |result| !result.may_apply(),
+ )
+ }
+ })
}
/// Check if both impls can be satisfied by a common type by considering whether
@@ -832,9 +827,7 @@ where
// This should only be created when checking whether we have to check whether some
// auto trait impl applies. There will never be multiple impls, so we can just
// act as if it were a local type here.
- ty::GeneratorWitness(_) | ty::GeneratorWitnessMIR(..) => {
- ControlFlow::Break(OrphanCheckEarlyExit::LocalTy(ty))
- }
+ ty::GeneratorWitness(..) => ControlFlow::Break(OrphanCheckEarlyExit::LocalTy(ty)),
ty::Alias(ty::Opaque, ..) => {
// This merits some explanation.
// Normally, opaque types are not involved when performing
@@ -890,3 +883,144 @@ where
ControlFlow::Continue(())
}
}
+
+/// Compute the `intercrate_ambiguity_causes` for the new solver using
+/// "proof trees".
+///
+/// This is a bit scuffed but seems to be good enough, at least
+/// when looking at UI tests. Given that it is only used to improve
+/// diagnostics this is good enough. We can always improve it once there
+/// are test cases where it is currently not enough.
+fn compute_intercrate_ambiguity_causes<'tcx>(
+ infcx: &InferCtxt<'tcx>,
+ obligations: &[PredicateObligation<'tcx>],
+) -> FxIndexSet<IntercrateAmbiguityCause> {
+ let mut causes: FxIndexSet<IntercrateAmbiguityCause> = Default::default();
+
+ for obligation in obligations {
+ search_ambiguity_causes(infcx, obligation.clone().into(), &mut causes);
+ }
+
+ causes
+}
+
+struct AmbiguityCausesVisitor<'a> {
+ causes: &'a mut FxIndexSet<IntercrateAmbiguityCause>,
+}
+
+impl<'a, 'tcx> ProofTreeVisitor<'tcx> for AmbiguityCausesVisitor<'a> {
+ type BreakTy = !;
+ fn visit_goal(&mut self, goal: &InspectGoal<'_, 'tcx>) -> ControlFlow<Self::BreakTy> {
+ let infcx = goal.infcx();
+ for cand in goal.candidates() {
+ cand.visit_nested(self)?;
+ }
+ // When searching for intercrate ambiguity causes, we only need to look
+ // at ambiguous goals, as for others the coherence unknowable candidate
+ // was irrelevant.
+ match goal.result() {
+ Ok(Certainty::Maybe(_)) => {}
+ Ok(Certainty::Yes) | Err(NoSolution) => return ControlFlow::Continue(()),
+ }
+
+ let Goal { param_env, predicate } = goal.goal();
+
+ // For bound predicates we simply call `infcx.replace_bound_vars_with_placeholders`
+ // and then prove the resulting predicate as a nested goal.
+ let trait_ref = match predicate.kind().no_bound_vars() {
+ Some(ty::PredicateKind::Clause(ty::ClauseKind::Trait(tr))) => tr.trait_ref,
+ Some(ty::PredicateKind::Clause(ty::ClauseKind::Projection(proj))) => {
+ proj.projection_ty.trait_ref(infcx.tcx)
+ }
+ _ => return ControlFlow::Continue(()),
+ };
+
+ let mut ambiguity_cause = None;
+ for cand in goal.candidates() {
+ // FIXME: boiiii, using string comparisions here sure is scuffed.
+ if let inspect::ProbeKind::MiscCandidate { name: "coherence unknowable", result: _ } =
+ cand.kind()
+ {
+ let lazily_normalize_ty = |ty: Ty<'tcx>| {
+ let mut fulfill_cx = <dyn TraitEngine<'tcx>>::new(infcx);
+ if matches!(ty.kind(), ty::Alias(..)) {
+ // FIXME(-Ztrait-solver=next-coherence): we currently don't
+ // normalize opaque types here, resulting in diverging behavior
+ // for TAITs.
+ match infcx
+ .at(&ObligationCause::dummy(), param_env)
+ .structurally_normalize(ty, &mut *fulfill_cx)
+ {
+ Ok(ty) => Ok(ty),
+ Err(_errs) => Err(()),
+ }
+ } else {
+ Ok(ty)
+ }
+ };
+
+ infcx.probe(|_| {
+ match trait_ref_is_knowable(infcx.tcx, trait_ref, lazily_normalize_ty) {
+ Err(()) => {}
+ Ok(Ok(())) => warn!("expected an unknowable trait ref: {trait_ref:?}"),
+ Ok(Err(conflict)) => {
+ if !trait_ref.references_error() {
+ let self_ty = trait_ref.self_ty();
+ let (trait_desc, self_desc) = with_no_trimmed_paths!({
+ let trait_desc = trait_ref.print_only_trait_path().to_string();
+ let self_desc = self_ty
+ .has_concrete_skeleton()
+ .then(|| self_ty.to_string());
+ (trait_desc, self_desc)
+ });
+ ambiguity_cause = Some(match conflict {
+ Conflict::Upstream => {
+ IntercrateAmbiguityCause::UpstreamCrateUpdate {
+ trait_desc,
+ self_desc,
+ }
+ }
+ Conflict::Downstream => {
+ IntercrateAmbiguityCause::DownstreamCrate {
+ trait_desc,
+ self_desc,
+ }
+ }
+ });
+ }
+ }
+ }
+ })
+ } else {
+ match cand.result() {
+ // We only add an ambiguity cause if the goal would otherwise
+ // result in an error.
+ //
+ // FIXME: While this matches the behavior of the
+ // old solver, it is not the only way in which the unknowable
+ // candidates *weaken* coherence, they can also force otherwise
+ // sucessful normalization to be ambiguous.
+ Ok(Certainty::Maybe(_) | Certainty::Yes) => {
+ ambiguity_cause = None;
+ break;
+ }
+ Err(NoSolution) => continue,
+ }
+ }
+ }
+
+ if let Some(ambiguity_cause) = ambiguity_cause {
+ self.causes.insert(ambiguity_cause);
+ }
+
+ ControlFlow::Continue(())
+ }
+}
+
+fn search_ambiguity_causes<'tcx>(
+ infcx: &InferCtxt<'tcx>,
+ goal: Goal<'tcx, ty::Predicate<'tcx>>,
+ causes: &mut FxIndexSet<IntercrateAmbiguityCause>,
+) {
+ infcx.visit_proof_tree(goal, &mut AmbiguityCausesVisitor { causes });
+}
diff --git a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
index 3d0d3812d..62ab1e104 100644
--- a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
+++ b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
@@ -73,13 +73,13 @@ pub fn is_const_evaluatable<'tcx>(
ty::ConstKind::Unevaluated(uv) => {
let concrete = infcx.const_eval_resolve(param_env, uv, Some(span));
match concrete {
- Err(ErrorHandled::TooGeneric) => {
+ Err(ErrorHandled::TooGeneric(_)) => {
Err(NotConstEvaluatable::Error(infcx.tcx.sess.delay_span_bug(
span,
"Missing value for constant, but no error reported?",
)))
}
- Err(ErrorHandled::Reported(e)) => Err(NotConstEvaluatable::Error(e.into())),
+ Err(ErrorHandled::Reported(e, _)) => Err(NotConstEvaluatable::Error(e.into())),
Ok(_) => Ok(()),
}
}
@@ -132,7 +132,7 @@ pub fn is_const_evaluatable<'tcx>(
.emit()
}
- Err(ErrorHandled::TooGeneric) => {
+ Err(ErrorHandled::TooGeneric(_)) => {
let err = if uv.has_non_region_infer() {
NotConstEvaluatable::MentionsInfer
} else if uv.has_non_region_param() {
@@ -147,7 +147,7 @@ pub fn is_const_evaluatable<'tcx>(
Err(err)
}
- Err(ErrorHandled::Reported(e)) => Err(NotConstEvaluatable::Error(e.into())),
+ Err(ErrorHandled::Reported(e, _)) => Err(NotConstEvaluatable::Error(e.into())),
Ok(_) => Ok(()),
}
}
diff --git a/compiler/rustc_trait_selection/src/traits/engine.rs b/compiler/rustc_trait_selection/src/traits/engine.rs
index 820973dc0..015e38b2a 100644
--- a/compiler/rustc_trait_selection/src/traits/engine.rs
+++ b/compiler/rustc_trait_selection/src/traits/engine.rs
@@ -23,6 +23,7 @@ use rustc_middle::traits::query::NoSolution;
use rustc_middle::ty::error::TypeError;
use rustc_middle::ty::ToPredicate;
use rustc_middle::ty::TypeFoldable;
+use rustc_middle::ty::Variance;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_session::config::TraitSolver;
@@ -156,6 +157,20 @@ impl<'a, 'tcx> ObligationCtxt<'a, 'tcx> {
.map(|infer_ok| self.register_infer_ok_obligations(infer_ok))
}
+ pub fn relate<T: ToTrace<'tcx>>(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ variance: Variance,
+ expected: T,
+ actual: T,
+ ) -> Result<(), TypeError<'tcx>> {
+ self.infcx
+ .at(cause, param_env)
+ .relate(DefineOpaqueTypes::Yes, expected, variance, actual)
+ .map(|infer_ok| self.register_infer_ok_obligations(infer_ok))
+ }
+
/// Checks whether `expected` is a supertype of `actual`: `expected :> actual`.
pub fn sup<T: ToTrace<'tcx>>(
&self,
diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs
index 457d5420c..2a586f810 100644
--- a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs
@@ -986,6 +986,9 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
}
}
+ self.explain_hrtb_projection(&mut err, trait_predicate, obligation.param_env, &obligation.cause);
+ self.suggest_desugaring_async_fn_in_trait(&mut err, trait_ref);
+
// Return early if the trait is Debug or Display and the invocation
// originates within a standard library macro, because the output
// is otherwise overwhelming and unhelpful (see #85844 for an
@@ -1844,7 +1847,6 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
ty::Generator(..) => Some(18),
ty::Foreign(..) => Some(19),
ty::GeneratorWitness(..) => Some(20),
- ty::GeneratorWitnessMIR(..) => Some(21),
ty::Placeholder(..) | ty::Bound(..) | ty::Infer(..) | ty::Error(_) => None,
}
}
@@ -2054,7 +2056,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
tcx: self.tcx,
ty_op: |ty| ty,
lt_op: |lt| lt,
- ct_op: |ct| ct.eval(self.tcx, ty::ParamEnv::empty()),
+ ct_op: |ct| ct.normalize(self.tcx, ty::ParamEnv::empty()),
});
cand
})
@@ -2920,6 +2922,16 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
rustc_transmute::Reason::DstIsTooBig => {
format!("The size of `{src}` is smaller than the size of `{dst}`")
}
+ rustc_transmute::Reason::SrcSizeOverflow => {
+ format!(
+ "values of the type `{src}` are too big for the current architecture"
+ )
+ }
+ rustc_transmute::Reason::DstSizeOverflow => {
+ format!(
+ "values of the type `{dst}` are too big for the current architecture"
+ )
+ }
rustc_transmute::Reason::DstHasStricterAlignment {
src_min_align,
dst_min_align,
@@ -2999,10 +3011,10 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// Try to report a help message
if is_fn_trait
&& let Ok((implemented_kind, params)) = self.type_implements_fn_trait(
- obligation.param_env,
- trait_ref.self_ty(),
- trait_predicate.skip_binder().polarity,
- )
+ obligation.param_env,
+ trait_ref.self_ty(),
+ trait_predicate.skip_binder().polarity,
+ )
{
self.add_help_message_for_fn_trait(trait_ref, err, implemented_kind, params);
} else if !trait_ref.has_non_region_infer()
@@ -3021,6 +3033,15 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
None,
obligation.cause.body_id,
);
+ } else if trait_ref.def_id().is_local()
+ && self.tcx.trait_impls_of(trait_ref.def_id()).is_empty()
+ && !self.tcx.trait_is_auto(trait_ref.def_id())
+ && !self.tcx.trait_is_alias(trait_ref.def_id())
+ {
+ err.span_help(
+ self.tcx.def_span(trait_ref.def_id()),
+ crate::fluent_generated::trait_selection_trait_has_no_impls,
+ );
} else if !suggested && !unsatisfied_const {
// Can't show anything else useful, try to find similar impls.
let impl_candidates = self.find_similar_impl_candidates(*trait_predicate);
@@ -3031,7 +3052,12 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
err,
true,
) {
- self.report_similar_impl_candidates_for_root_obligation(&obligation, *trait_predicate, body_def_id, err);
+ self.report_similar_impl_candidates_for_root_obligation(
+ &obligation,
+ *trait_predicate,
+ body_def_id,
+ err,
+ );
}
self.suggest_convert_to_slice(
@@ -3185,7 +3211,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
let name = match self.tcx.opaque_type_origin(def_id.expect_local()) {
hir::OpaqueTyOrigin::FnReturn(_) | hir::OpaqueTyOrigin::AsyncFn(_) => {
- format!("opaque type")
+ "opaque type".to_string()
}
hir::OpaqueTyOrigin::TyAlias { .. } => {
format!("`{}`", self.tcx.def_path_debug_str(def_id))
diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs
index 0e73bad19..d645dc033 100644
--- a/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs
+++ b/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs
@@ -9,6 +9,7 @@ use rustc_hir::def_id::DefId;
use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, GenericParamDefKind, TyCtxt};
use rustc_parse_format::{ParseMode, Parser, Piece, Position};
+use rustc_session::lint::builtin::UNKNOWN_OR_MALFORMED_DIAGNOSTIC_ATTRIBUTES;
use rustc_span::symbol::{kw, sym, Symbol};
use rustc_span::{Span, DUMMY_SP};
use std::iter;
@@ -103,7 +104,9 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
hir::Node::Item(hir::Item { kind: hir::ItemKind::Fn(sig, _, body_id), .. }) => {
self.describe_generator(*body_id).or_else(|| {
Some(match sig.header {
- hir::FnHeader { asyncness: hir::IsAsync::Async, .. } => "an async function",
+ hir::FnHeader { asyncness: hir::IsAsync::Async(_), .. } => {
+ "an async function"
+ }
_ => "a function",
})
})
@@ -117,7 +120,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
..
}) => self.describe_generator(*body_id).or_else(|| {
Some(match sig.header {
- hir::FnHeader { asyncness: hir::IsAsync::Async, .. } => "an async method",
+ hir::FnHeader { asyncness: hir::IsAsync::Async(_), .. } => "an async method",
_ => "a method",
})
}),
@@ -257,7 +260,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// Arrays give us `[]`, `[{ty}; _]` and `[{ty}; N]`
if let ty::Array(aty, len) = self_ty.kind() {
flags.push((sym::_Self, Some("[]".to_string())));
- let len = len.try_to_value().and_then(|v| v.try_to_target_usize(self.tcx));
+ let len = len.try_to_valtree().and_then(|v| v.try_to_target_usize(self.tcx));
flags.push((sym::_Self, Some(format!("[{aty}; _]"))));
if let Some(n) = len {
flags.push((sym::_Self, Some(format!("[{aty}; {n}]"))));
@@ -336,6 +339,10 @@ pub enum AppendConstMessage {
Custom(Symbol),
}
+#[derive(LintDiagnostic)]
+#[diag(trait_selection_malformed_on_unimplemented_attr)]
+pub struct NoValueInOnUnimplementedLint;
+
impl<'tcx> OnUnimplementedDirective {
fn parse(
tcx: TyCtxt<'tcx>,
@@ -343,7 +350,8 @@ impl<'tcx> OnUnimplementedDirective {
items: &[NestedMetaItem],
span: Span,
is_root: bool,
- ) -> Result<Self, ErrorGuaranteed> {
+ is_diagnostic_namespace_variant: bool,
+ ) -> Result<Option<Self>, ErrorGuaranteed> {
let mut errored = None;
let mut item_iter = items.iter();
@@ -391,7 +399,10 @@ impl<'tcx> OnUnimplementedDirective {
note = parse_value(note_)?;
continue;
}
- } else if item.has_name(sym::parent_label) && parent_label.is_none() {
+ } else if item.has_name(sym::parent_label)
+ && parent_label.is_none()
+ && !is_diagnostic_namespace_variant
+ {
if let Some(parent_label_) = item.value_str() {
parent_label = parse_value(parent_label_)?;
continue;
@@ -401,15 +412,30 @@ impl<'tcx> OnUnimplementedDirective {
&& message.is_none()
&& label.is_none()
&& note.is_none()
+ && !is_diagnostic_namespace_variant
+ // FIXME(diagnostic_namespace): disallow filters for now
{
if let Some(items) = item.meta_item_list() {
- match Self::parse(tcx, item_def_id, &items, item.span(), false) {
- Ok(subcommand) => subcommands.push(subcommand),
+ match Self::parse(
+ tcx,
+ item_def_id,
+ &items,
+ item.span(),
+ false,
+ is_diagnostic_namespace_variant,
+ ) {
+ Ok(Some(subcommand)) => subcommands.push(subcommand),
+ Ok(None) => bug!(
+ "This cannot happen for now as we only reach that if `is_diagnostic_namespace_variant` is false"
+ ),
Err(reported) => errored = Some(reported),
};
continue;
}
- } else if item.has_name(sym::append_const_msg) && append_const_msg.is_none() {
+ } else if item.has_name(sym::append_const_msg)
+ && append_const_msg.is_none()
+ && !is_diagnostic_namespace_variant
+ {
if let Some(msg) = item.value_str() {
append_const_msg = Some(AppendConstMessage::Custom(msg));
continue;
@@ -419,14 +445,23 @@ impl<'tcx> OnUnimplementedDirective {
}
}
- // nothing found
- tcx.sess.emit_err(NoValueInOnUnimplemented { span: item.span() });
+ if is_diagnostic_namespace_variant {
+ tcx.emit_spanned_lint(
+ UNKNOWN_OR_MALFORMED_DIAGNOSTIC_ATTRIBUTES,
+ tcx.hir().local_def_id_to_hir_id(item_def_id.expect_local()),
+ vec![item.span()],
+ NoValueInOnUnimplementedLint,
+ );
+ } else {
+ // nothing found
+ tcx.sess.emit_err(NoValueInOnUnimplemented { span: item.span() });
+ }
}
if let Some(reported) = errored {
- Err(reported)
+ if is_diagnostic_namespace_variant { Ok(None) } else { Err(reported) }
} else {
- Ok(OnUnimplementedDirective {
+ Ok(Some(OnUnimplementedDirective {
condition,
subcommands,
message,
@@ -434,32 +469,58 @@ impl<'tcx> OnUnimplementedDirective {
note,
parent_label,
append_const_msg,
- })
+ }))
}
}
pub fn of_item(tcx: TyCtxt<'tcx>, item_def_id: DefId) -> Result<Option<Self>, ErrorGuaranteed> {
- let Some(attr) = tcx.get_attr(item_def_id, sym::rustc_on_unimplemented) else {
+ let mut is_diagnostic_namespace_variant = false;
+ let Some(attr) = tcx.get_attr(item_def_id, sym::rustc_on_unimplemented).or_else(|| {
+ if tcx.features().diagnostic_namespace {
+ is_diagnostic_namespace_variant = true;
+ tcx.get_attrs_by_path(item_def_id, &[sym::diagnostic, sym::on_unimplemented]).next()
+ } else {
+ None
+ }
+ }) else {
return Ok(None);
};
let result = if let Some(items) = attr.meta_item_list() {
- Self::parse(tcx, item_def_id, &items, attr.span, true).map(Some)
+ Self::parse(tcx, item_def_id, &items, attr.span, true, is_diagnostic_namespace_variant)
} else if let Some(value) = attr.value_str() {
- Ok(Some(OnUnimplementedDirective {
- condition: None,
- message: None,
- subcommands: vec![],
- label: Some(OnUnimplementedFormatString::try_parse(
- tcx,
- item_def_id,
- value,
+ if !is_diagnostic_namespace_variant {
+ Ok(Some(OnUnimplementedDirective {
+ condition: None,
+ message: None,
+ subcommands: vec![],
+ label: Some(OnUnimplementedFormatString::try_parse(
+ tcx,
+ item_def_id,
+ value,
+ attr.span,
+ )?),
+ note: None,
+ parent_label: None,
+ append_const_msg: None,
+ }))
+ } else {
+ tcx.emit_spanned_lint(
+ UNKNOWN_OR_MALFORMED_DIAGNOSTIC_ATTRIBUTES,
+ tcx.hir().local_def_id_to_hir_id(item_def_id.expect_local()),
attr.span,
- )?),
- note: None,
- parent_label: None,
- append_const_msg: None,
- }))
+ NoValueInOnUnimplementedLint,
+ );
+ Ok(None)
+ }
+ } else if is_diagnostic_namespace_variant {
+ tcx.emit_spanned_lint(
+ UNKNOWN_OR_MALFORMED_DIAGNOSTIC_ATTRIBUTES,
+ tcx.hir().local_def_id_to_hir_id(item_def_id.expect_local()),
+ attr.span,
+ NoValueInOnUnimplementedLint,
+ );
+ Ok(None)
} else {
let reported =
tcx.sess.delay_span_bug(DUMMY_SP, "of_item: neither meta_item_list nor value_str");
diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs
index 611ec6b00..15f2ba809 100644
--- a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs
+++ b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs
@@ -30,10 +30,9 @@ use rustc_infer::infer::{DefineOpaqueTypes, InferOk, LateBoundRegionConversionTi
use rustc_middle::hir::map;
use rustc_middle::ty::error::TypeError::{self, Sorts};
use rustc_middle::ty::{
- self, suggest_arbitrary_trait_bound, suggest_constraining_type_param, AdtKind,
- GeneratorDiagnosticData, GeneratorInteriorTypeCause, GenericArgs, InferTy, IsSuggestable,
- ToPredicate, Ty, TyCtxt, TypeAndMut, TypeFoldable, TypeFolder, TypeSuperFoldable,
- TypeVisitableExt, TypeckResults,
+ self, suggest_arbitrary_trait_bound, suggest_constraining_type_param, AdtKind, GenericArgs,
+ InferTy, IsSuggestable, ToPredicate, Ty, TyCtxt, TypeAndMut, TypeFoldable, TypeFolder,
+ TypeSuperFoldable, TypeVisitableExt, TypeckResults,
};
use rustc_span::def_id::LocalDefId;
use rustc_span::symbol::{sym, Ident, Symbol};
@@ -50,7 +49,7 @@ use rustc_middle::ty::print::{with_forced_trimmed_paths, with_no_trimmed_paths};
#[derive(Debug)]
pub enum GeneratorInteriorOrUpvar {
// span of interior type
- Interior(Span, Option<(Option<Span>, Span, Option<hir::HirId>, Option<Span>)>),
+ Interior(Span, Option<(Span, Option<Span>)>),
// span of upvar
Upvar(Span),
}
@@ -58,15 +57,12 @@ pub enum GeneratorInteriorOrUpvar {
// This type provides a uniform interface to retrieve data on generators, whether it originated from
// the local crate being compiled or from a foreign crate.
#[derive(Debug)]
-pub enum GeneratorData<'tcx, 'a> {
- Local(&'a TypeckResults<'tcx>),
- Foreign(&'tcx GeneratorDiagnosticData<'tcx>),
-}
+struct GeneratorData<'tcx, 'a>(&'a TypeckResults<'tcx>);
impl<'tcx, 'a> GeneratorData<'tcx, 'a> {
- // Try to get information about variables captured by the generator that matches a type we are
- // looking for with `ty_matches` function. We uses it to find upvar which causes a failure to
- // meet an obligation
+ /// Try to get information about variables captured by the generator that matches a type we are
+ /// looking for with `ty_matches` function. We uses it to find upvar which causes a failure to
+ /// meet an obligation
fn try_get_upvar_span<F>(
&self,
infer_context: &InferCtxt<'tcx>,
@@ -76,27 +72,21 @@ impl<'tcx, 'a> GeneratorData<'tcx, 'a> {
where
F: Fn(ty::Binder<'tcx, Ty<'tcx>>) -> bool,
{
- match self {
- GeneratorData::Local(typeck_results) => {
- infer_context.tcx.upvars_mentioned(generator_did).and_then(|upvars| {
- upvars.iter().find_map(|(upvar_id, upvar)| {
- let upvar_ty = typeck_results.node_type(*upvar_id);
- let upvar_ty = infer_context.resolve_vars_if_possible(upvar_ty);
- ty_matches(ty::Binder::dummy(upvar_ty))
- .then(|| GeneratorInteriorOrUpvar::Upvar(upvar.span))
- })
- })
- }
- GeneratorData::Foreign(_) => None,
- }
+ infer_context.tcx.upvars_mentioned(generator_did).and_then(|upvars| {
+ upvars.iter().find_map(|(upvar_id, upvar)| {
+ let upvar_ty = self.0.node_type(*upvar_id);
+ let upvar_ty = infer_context.resolve_vars_if_possible(upvar_ty);
+ ty_matches(ty::Binder::dummy(upvar_ty))
+ .then(|| GeneratorInteriorOrUpvar::Upvar(upvar.span))
+ })
+ })
}
- // Try to get the span of a type being awaited on that matches the type we are looking with the
- // `ty_matches` function. We uses it to find awaited type which causes a failure to meet an
- // obligation
+ /// Try to get the span of a type being awaited on that matches the type we are looking with the
+ /// `ty_matches` function. We uses it to find awaited type which causes a failure to meet an
+ /// obligation
fn get_from_await_ty<F>(
&self,
- tcx: TyCtxt<'tcx>,
visitor: AwaitsVisitor,
hir: map::Map<'tcx>,
ty_matches: F,
@@ -104,69 +94,12 @@ impl<'tcx, 'a> GeneratorData<'tcx, 'a> {
where
F: Fn(ty::Binder<'tcx, Ty<'tcx>>) -> bool,
{
- match self {
- GeneratorData::Local(typeck_results) => visitor
- .awaits
- .into_iter()
- .map(|id| hir.expect_expr(id))
- .find(|await_expr| {
- ty_matches(ty::Binder::dummy(typeck_results.expr_ty_adjusted(&await_expr)))
- })
- .map(|expr| expr.span),
- GeneratorData::Foreign(generator_diagnostic_data) => visitor
- .awaits
- .into_iter()
- .map(|id| hir.expect_expr(id))
- .find(|await_expr| {
- ty_matches(ty::Binder::dummy(
- generator_diagnostic_data
- .adjustments
- .get(&await_expr.hir_id.local_id)
- .map_or::<&[ty::adjustment::Adjustment<'tcx>], _>(&[], |a| &a[..])
- .last()
- .map_or_else::<Ty<'tcx>, _, _>(
- || {
- generator_diagnostic_data
- .nodes_types
- .get(&await_expr.hir_id.local_id)
- .cloned()
- .unwrap_or_else(|| {
- bug!(
- "node_type: no type for node {}",
- tcx.hir().node_to_string(await_expr.hir_id)
- )
- })
- },
- |adj| adj.target,
- ),
- ))
- })
- .map(|expr| expr.span),
- }
- }
-
- /// Get the type, expression, span and optional scope span of all types
- /// that are live across the yield of this generator
- fn get_generator_interior_types(
- &self,
- ) -> ty::Binder<'tcx, &[GeneratorInteriorTypeCause<'tcx>]> {
- match self {
- GeneratorData::Local(typeck_result) => {
- typeck_result.generator_interior_types.as_deref()
- }
- GeneratorData::Foreign(generator_diagnostic_data) => {
- generator_diagnostic_data.generator_interior_types.as_deref()
- }
- }
- }
-
- // Used to get the source of the data, note we don't have as much information for generators
- // originated from foreign crates
- fn is_foreign(&self) -> bool {
- match self {
- GeneratorData::Local(_) => false,
- GeneratorData::Foreign(_) => true,
- }
+ visitor
+ .awaits
+ .into_iter()
+ .map(|id| hir.expect_expr(id))
+ .find(|await_expr| ty_matches(ty::Binder::dummy(self.0.expr_ty_adjusted(&await_expr))))
+ .map(|expr| expr.span)
}
}
@@ -316,7 +249,6 @@ pub trait TypeErrCtxtExt<'tcx> {
outer_generator: Option<DefId>,
trait_pred: ty::TraitPredicate<'tcx>,
target_ty: Ty<'tcx>,
- typeck_results: Option<&ty::TypeckResults<'tcx>>,
obligation: &PredicateObligation<'tcx>,
next_code: Option<&ObligationCauseCode<'tcx>>,
);
@@ -406,6 +338,20 @@ pub trait TypeErrCtxtExt<'tcx> {
candidate_impls: &[ImplCandidate<'tcx>],
span: Span,
);
+
+ fn explain_hrtb_projection(
+ &self,
+ diag: &mut Diagnostic,
+ pred: ty::PolyTraitPredicate<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ cause: &ObligationCause<'tcx>,
+ );
+
+ fn suggest_desugaring_async_fn_in_trait(
+ &self,
+ err: &mut Diagnostic,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ );
}
fn predicate_constraint(generics: &hir::Generics<'_>, pred: ty::Predicate<'_>) -> (Span, String) {
@@ -838,7 +784,20 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
obligation.param_env,
real_trait_pred_and_base_ty,
);
- if self.predicate_may_hold(&obligation) {
+ let sized_obligation = Obligation::new(
+ self.tcx,
+ obligation.cause.clone(),
+ obligation.param_env,
+ ty::TraitRef::from_lang_item(
+ self.tcx,
+ hir::LangItem::Sized,
+ obligation.cause.span,
+ [base_ty],
+ ),
+ );
+ if self.predicate_may_hold(&obligation)
+ && self.predicate_must_hold_modulo_regions(&sized_obligation)
+ {
let call_node = self.tcx.hir().get(*call_hir_id);
let msg = "consider dereferencing here";
let is_receiver = matches!(
@@ -1792,7 +1751,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
);
} else {
err.note(format!(
- "`{}` is implemented for `{:?}`, but not for `{:?}`",
+ "`{}` is implemented for `{}`, but not for `{}`",
trait_pred.print_modifiers_and_trait_path(),
suggested_ty,
trait_pred.skip_binder().self_ty(),
@@ -2213,11 +2172,10 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
);
match *ty.kind() {
- ty::Generator(did, ..) | ty::GeneratorWitnessMIR(did, _) => {
+ ty::Generator(did, ..) | ty::GeneratorWitness(did, _) => {
generator = generator.or(Some(did));
outer_generator = Some(did);
}
- ty::GeneratorWitness(..) => {}
ty::Tuple(_) if !seen_upvar_tys_infer_tuple => {
// By introducing a tuple of upvar types into the chain of obligations
// of a generator, the first non-generator item is now the tuple itself,
@@ -2243,11 +2201,10 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
);
match *ty.kind() {
- ty::Generator(did, ..) | ty::GeneratorWitnessMIR(did, ..) => {
+ ty::Generator(did, ..) | ty::GeneratorWitness(did, ..) => {
generator = generator.or(Some(did));
outer_generator = Some(did);
}
- ty::GeneratorWitness(..) => {}
ty::Tuple(_) if !seen_upvar_tys_infer_tuple => {
// By introducing a tuple of upvar types into the chain of obligations
// of a generator, the first non-generator item is now the tuple itself,
@@ -2324,12 +2281,9 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
// cycles. If we can't use resolved types because the generator comes from another crate,
// we still provide a targeted error but without all the relevant spans.
let generator_data = match &self.typeck_results {
- Some(t) if t.hir_owner.to_def_id() == generator_did_root => GeneratorData::Local(&t),
+ Some(t) if t.hir_owner.to_def_id() == generator_did_root => GeneratorData(&t),
_ if generator_did.is_local() => {
- GeneratorData::Local(self.tcx.typeck(generator_did.expect_local()))
- }
- _ if let Some(generator_diag_data) = self.tcx.generator_diagnostic_data(generator_did) => {
- GeneratorData::Foreign(generator_diag_data)
+ GeneratorData(self.tcx.typeck(generator_did.expect_local()))
}
_ => return false,
};
@@ -2341,30 +2295,11 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
let mut interior_or_upvar_span = None;
- let from_awaited_ty = generator_data.get_from_await_ty(self.tcx, visitor, hir, ty_matches);
+ let from_awaited_ty = generator_data.get_from_await_ty(visitor, hir, ty_matches);
debug!(?from_awaited_ty);
- // The generator interior types share the same binders
- if let Some(cause) =
- generator_data.get_generator_interior_types().skip_binder().iter().find(
- |ty::GeneratorInteriorTypeCause { ty, .. }| {
- ty_matches(generator_data.get_generator_interior_types().rebind(*ty))
- },
- )
- {
- let ty::GeneratorInteriorTypeCause { span, scope_span, yield_span, expr, .. } = cause;
-
- interior_or_upvar_span = Some(GeneratorInteriorOrUpvar::Interior(
- *span,
- Some((*scope_span, *yield_span, *expr, from_awaited_ty)),
- ));
-
- if interior_or_upvar_span.is_none() && generator_data.is_foreign() {
- interior_or_upvar_span = Some(GeneratorInteriorOrUpvar::Interior(*span, None));
- }
- } else if self.tcx.sess.opts.unstable_opts.drop_tracking_mir
- // Avoid disclosing internal information to downstream crates.
- && generator_did.is_local()
+ // Avoid disclosing internal information to downstream crates.
+ if generator_did.is_local()
// Try to avoid cycles.
&& !generator_within_in_progress_typeck
&& let Some(generator_info) = self.tcx.mir_generator_witnesses(generator_did)
@@ -2380,7 +2315,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
if ty_matches(ty::Binder::dummy(decl.ty)) && !decl.ignore_for_traits {
interior_or_upvar_span = Some(GeneratorInteriorOrUpvar::Interior(
decl.source_info.span,
- Some((None, source_info.span, None, from_awaited_ty)),
+ Some((source_info.span, from_awaited_ty)),
));
break 'find_source;
}
@@ -2393,17 +2328,13 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
generator_data.try_get_upvar_span(&self, generator_did, ty_matches);
}
- if interior_or_upvar_span.is_none() && generator_data.is_foreign() {
+ if interior_or_upvar_span.is_none() && !generator_did.is_local() {
interior_or_upvar_span = Some(GeneratorInteriorOrUpvar::Interior(span, None));
}
debug!(?interior_or_upvar_span);
if let Some(interior_or_upvar_span) = interior_or_upvar_span {
let is_async = self.tcx.generator_is_async(generator_did);
- let typeck_results = match generator_data {
- GeneratorData::Local(typeck_results) => Some(typeck_results),
- GeneratorData::Foreign(_) => None,
- };
self.note_obligation_cause_for_async_await(
err,
interior_or_upvar_span,
@@ -2411,7 +2342,6 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
outer_generator,
trait_ref,
target_ty,
- typeck_results,
obligation,
next_code,
);
@@ -2432,7 +2362,6 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
outer_generator: Option<DefId>,
trait_pred: ty::TraitPredicate<'tcx>,
target_ty: Ty<'tcx>,
- typeck_results: Option<&ty::TypeckResults<'tcx>>,
obligation: &PredicateObligation<'tcx>,
next_code: Option<&ObligationCauseCode<'tcx>>,
) {
@@ -2490,9 +2419,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
format!("does not implement `{}`", trait_pred.print_modifiers_and_trait_path())
};
- let mut explain_yield = |interior_span: Span,
- yield_span: Span,
- scope_span: Option<Span>| {
+ let mut explain_yield = |interior_span: Span, yield_span: Span| {
let mut span = MultiSpan::from_span(yield_span);
let snippet = match source_map.span_to_snippet(interior_span) {
// #70935: If snippet contains newlines, display "the value" instead
@@ -2524,22 +2451,14 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
interior_span,
format!("has type `{target_ty}` which {trait_explanation}"),
);
- if let Some(scope_span) = scope_span {
- let scope_span = source_map.end_point(scope_span);
-
- let msg = format!("{snippet} is later dropped here");
- span.push_span_label(scope_span, msg);
- }
err.span_note(
- span,
- format!(
- "{future_or_generator} {trait_explanation} as this value is used across {an_await_or_yield}"
- ),
- );
+ span,
+ format!("{future_or_generator} {trait_explanation} as this value is used across {an_await_or_yield}"),
+ );
};
match interior_or_upvar_span {
GeneratorInteriorOrUpvar::Interior(interior_span, interior_extra_info) => {
- if let Some((scope_span, yield_span, expr, from_awaited_ty)) = interior_extra_info {
+ if let Some((yield_span, from_awaited_ty)) = interior_extra_info {
if let Some(await_span) = from_awaited_ty {
// The type causing this obligation is one being awaited at await_span.
let mut span = MultiSpan::from_span(await_span);
@@ -2557,62 +2476,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
);
} else {
// Look at the last interior type to get a span for the `.await`.
- debug!(
- generator_interior_types = ?format_args!(
- "{:#?}", typeck_results.as_ref().map(|t| &t.generator_interior_types)
- ),
- );
- explain_yield(interior_span, yield_span, scope_span);
- }
-
- if let Some(expr_id) = expr {
- let expr = hir.expect_expr(expr_id);
- debug!("target_ty evaluated from {:?}", expr);
-
- let parent = hir.parent_id(expr_id);
- if let Some(hir::Node::Expr(e)) = hir.find(parent) {
- let parent_span = hir.span(parent);
- let parent_did = parent.owner.to_def_id();
- // ```rust
- // impl T {
- // fn foo(&self) -> i32 {}
- // }
- // T.foo();
- // ^^^^^^^ a temporary `&T` created inside this method call due to `&self`
- // ```
- //
- let is_region_borrow = if let Some(typeck_results) = typeck_results {
- typeck_results
- .expr_adjustments(expr)
- .iter()
- .any(|adj| adj.is_region_borrow())
- } else {
- false
- };
-
- // ```rust
- // struct Foo(*const u8);
- // bar(Foo(std::ptr::null())).await;
- // ^^^^^^^^^^^^^^^^^^^^^ raw-ptr `*T` created inside this struct ctor.
- // ```
- debug!(parent_def_kind = ?self.tcx.def_kind(parent_did));
- let is_raw_borrow_inside_fn_like_call =
- match self.tcx.def_kind(parent_did) {
- DefKind::Fn | DefKind::Ctor(..) => target_ty.is_unsafe_ptr(),
- _ => false,
- };
- if let Some(typeck_results) = typeck_results {
- if (typeck_results.is_method_call(e) && is_region_borrow)
- || is_raw_borrow_inside_fn_like_call
- {
- err.span_help(
- parent_span,
- "consider moving this into a `let` \
- binding to create a shorter lived borrow",
- );
- }
- }
- }
+ explain_yield(interior_span, yield_span);
}
}
}
@@ -2690,6 +2554,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
| ObligationCauseCode::IfExpressionWithNoElse
| ObligationCauseCode::MainFunctionType
| ObligationCauseCode::StartFunctionType
+ | ObligationCauseCode::LangFunctionType(_)
| ObligationCauseCode::IntrinsicType
| ObligationCauseCode::MethodReceiver
| ObligationCauseCode::ReturnNoExpression
@@ -2979,6 +2844,24 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
ObligationCauseCode::InlineAsmSized => {
err.note("all inline asm arguments must have a statically known size");
}
+ ObligationCauseCode::SizedClosureCapture(closure_def_id) => {
+ err.note("all values captured by value by a closure must have a statically known size");
+ let hir::ExprKind::Closure(closure) = self.tcx.hir().get_by_def_id(closure_def_id).expect_expr().kind else {
+ bug!("expected closure in SizedClosureCapture obligation");
+ };
+ if let hir::CaptureBy::Value = closure.capture_clause
+ && let Some(span) = closure.fn_arg_span
+ {
+ err.span_label(span, "this closure captures all values by move");
+ }
+ }
+ ObligationCauseCode::SizedGeneratorInterior(generator_def_id) => {
+ let what = match self.tcx.generator_kind(generator_def_id) {
+ None | Some(hir::GeneratorKind::Gen) => "yield",
+ Some(hir::GeneratorKind::Async(..)) => "await",
+ };
+ err.note(format!("all values live across `{what}` must have a statically known size"));
+ }
ObligationCauseCode::ConstPatternStructural => {
err.note("constants used for pattern-matching must derive `PartialEq` and `Eq`");
}
@@ -3044,20 +2927,7 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
}
err.span_note(self.tcx.def_span(def_id), msg)
}
- ty::GeneratorWitness(bound_tys) => {
- use std::fmt::Write;
-
- // FIXME: this is kind of an unusual format for rustc, can we make it more clear?
- // Maybe we should just remove this note altogether?
- // FIXME: only print types which don't meet the trait requirement
- let mut msg =
- "required because it captures the following types: ".to_owned();
- for ty in bound_tys.skip_binder() {
- with_forced_trimmed_paths!(write!(msg, "`{ty}`, ").unwrap());
- }
- err.note(msg.trim_end_matches(", ").to_string())
- }
- ty::GeneratorWitnessMIR(def_id, args) => {
+ ty::GeneratorWitness(def_id, args) => {
use std::fmt::Write;
// FIXME: this is kind of an unusual format for rustc, can we make it more clear?
@@ -4014,6 +3884,201 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
}
}
}
+
+ fn explain_hrtb_projection(
+ &self,
+ diag: &mut Diagnostic,
+ pred: ty::PolyTraitPredicate<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ cause: &ObligationCause<'tcx>,
+ ) {
+ if pred.skip_binder().has_escaping_bound_vars() && pred.skip_binder().has_non_region_infer()
+ {
+ self.probe(|_| {
+ let ocx = ObligationCtxt::new(self);
+ let pred = self.instantiate_binder_with_placeholders(pred);
+ let pred = ocx.normalize(&ObligationCause::dummy(), param_env, pred);
+ ocx.register_obligation(Obligation::new(
+ self.tcx,
+ ObligationCause::dummy(),
+ param_env,
+ pred,
+ ));
+ if !ocx.select_where_possible().is_empty() {
+ // encountered errors.
+ return;
+ }
+
+ if let ObligationCauseCode::FunctionArgumentObligation {
+ call_hir_id,
+ arg_hir_id,
+ parent_code: _,
+ } = cause.code()
+ {
+ let arg_span = self.tcx.hir().span(*arg_hir_id);
+ let mut sp: MultiSpan = arg_span.into();
+
+ sp.push_span_label(
+ arg_span,
+ "the trait solver is unable to infer the \
+ generic types that should be inferred from this argument",
+ );
+ sp.push_span_label(
+ self.tcx.hir().span(*call_hir_id),
+ "add turbofish arguments to this call to \
+ specify the types manually, even if it's redundant",
+ );
+ diag.span_note(
+ sp,
+ "this is a known limitation of the trait solver that \
+ will be lifted in the future",
+ );
+ } else {
+ let mut sp: MultiSpan = cause.span.into();
+ sp.push_span_label(
+ cause.span,
+ "try adding turbofish arguments to this expression to \
+ specify the types manually, even if it's redundant",
+ );
+ diag.span_note(
+ sp,
+ "this is a known limitation of the trait solver that \
+ will be lifted in the future",
+ );
+ }
+ });
+ }
+ }
+
+ fn suggest_desugaring_async_fn_in_trait(
+ &self,
+ err: &mut Diagnostic,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ ) {
+ // Don't suggest if RTN is active -- we should prefer a where-clause bound instead.
+ if self.tcx.features().return_type_notation {
+ return;
+ }
+
+ let trait_def_id = trait_ref.def_id();
+
+ // Only suggest specifying auto traits
+ if !self.tcx.trait_is_auto(trait_def_id) {
+ return;
+ }
+
+ // Look for an RPITIT
+ let ty::Alias(ty::Projection, alias_ty) = trait_ref.self_ty().skip_binder().kind() else {
+ return;
+ };
+ let Some(ty::ImplTraitInTraitData::Trait { fn_def_id, opaque_def_id }) =
+ self.tcx.opt_rpitit_info(alias_ty.def_id)
+ else {
+ return;
+ };
+
+ let auto_trait = self.tcx.def_path_str(trait_def_id);
+ // ... which is a local function
+ let Some(fn_def_id) = fn_def_id.as_local() else {
+ // If it's not local, we can at least mention that the method is async, if it is.
+ if self.tcx.asyncness(fn_def_id).is_async() {
+ err.span_note(
+ self.tcx.def_span(fn_def_id),
+ format!(
+ "`{}::{}` is an `async fn` in trait, which does not \
+ automatically imply that its future is `{auto_trait}`",
+ alias_ty.trait_ref(self.tcx),
+ self.tcx.item_name(fn_def_id)
+ ),
+ );
+ }
+ return;
+ };
+ let Some(hir::Node::TraitItem(item)) = self.tcx.hir().find_by_def_id(fn_def_id) else {
+ return;
+ };
+
+ // ... whose signature is `async` (i.e. this is an AFIT)
+ let (sig, body) = item.expect_fn();
+ let hir::IsAsync::Async(async_span) = sig.header.asyncness else {
+ return;
+ };
+ let Ok(async_span) =
+ self.tcx.sess.source_map().span_extend_while(async_span, |c| c.is_whitespace())
+ else {
+ return;
+ };
+ let hir::FnRetTy::Return(hir::Ty { kind: hir::TyKind::OpaqueDef(def, ..), .. }) =
+ sig.decl.output
+ else {
+ // This should never happen, but let's not ICE.
+ return;
+ };
+
+ // Check that this is *not* a nested `impl Future` RPIT in an async fn
+ // (i.e. `async fn foo() -> impl Future`)
+ if def.owner_id.to_def_id() != opaque_def_id {
+ return;
+ }
+
+ let future = self.tcx.hir().item(*def).expect_opaque_ty();
+ let Some(hir::GenericBound::LangItemTrait(_, _, _, generics)) = future.bounds.get(0) else {
+ // `async fn` should always lower to a lang item bound... but don't ICE.
+ return;
+ };
+ let Some(hir::TypeBindingKind::Equality { term: hir::Term::Ty(future_output_ty) }) =
+ generics.bindings.get(0).map(|binding| binding.kind)
+ else {
+ // Also should never happen.
+ return;
+ };
+
+ let function_name = self.tcx.def_path_str(fn_def_id);
+
+ let mut sugg = if future_output_ty.span.is_empty() {
+ vec![
+ (async_span, String::new()),
+ (
+ future_output_ty.span,
+ format!(" -> impl std::future::Future<Output = ()> + {auto_trait}"),
+ ),
+ ]
+ } else {
+ vec![
+ (
+ future_output_ty.span.shrink_to_lo(),
+ "impl std::future::Future<Output = ".to_owned(),
+ ),
+ (future_output_ty.span.shrink_to_hi(), format!("> + {auto_trait}")),
+ (async_span, String::new()),
+ ]
+ };
+
+ // If there's a body, we also need to wrap it in `async {}`
+ if let hir::TraitFn::Provided(body) = body {
+ let body = self.tcx.hir().body(*body);
+ let body_span = body.value.span;
+ let body_span_without_braces =
+ body_span.with_lo(body_span.lo() + BytePos(1)).with_hi(body_span.hi() - BytePos(1));
+ if body_span_without_braces.is_empty() {
+ sugg.push((body_span_without_braces, " async {} ".to_owned()));
+ } else {
+ sugg.extend([
+ (body_span_without_braces.shrink_to_lo(), "async {".to_owned()),
+ (body_span_without_braces.shrink_to_hi(), "} ".to_owned()),
+ ]);
+ }
+ }
+
+ err.multipart_suggestion(
+ format!(
+ "`{auto_trait}` can be made part of the associated future's \
+ guarantees for all implementations of `{function_name}`"
+ ),
+ sugg,
+ Applicability::MachineApplicable,
+ );
+ }
}
/// Add a hint to add a missing borrow or remove an unnecessary one.
diff --git a/compiler/rustc_trait_selection/src/traits/fulfill.rs b/compiler/rustc_trait_selection/src/traits/fulfill.rs
index 3ebf1246a..55b5604b1 100644
--- a/compiler/rustc_trait_selection/src/traits/fulfill.rs
+++ b/compiler/rustc_trait_selection/src/traits/fulfill.rs
@@ -559,30 +559,31 @@ impl<'a, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'tcx> {
let stalled_on = &mut pending_obligation.stalled_on;
- let mut evaluate =
- |c: Const<'tcx>| {
- if let ty::ConstKind::Unevaluated(unevaluated) = c.kind() {
- match self.selcx.infcx.try_const_eval_resolve(
- obligation.param_env,
- unevaluated,
- c.ty(),
- Some(obligation.cause.span),
- ) {
- Ok(val) => Ok(val),
- Err(e) => match e {
- ErrorHandled::TooGeneric => {
+ let mut evaluate = |c: Const<'tcx>| {
+ if let ty::ConstKind::Unevaluated(unevaluated) = c.kind() {
+ match self.selcx.infcx.try_const_eval_resolve(
+ obligation.param_env,
+ unevaluated,
+ c.ty(),
+ Some(obligation.cause.span),
+ ) {
+ Ok(val) => Ok(val),
+ Err(e) => {
+ match e {
+ ErrorHandled::TooGeneric(..) => {
stalled_on.extend(unevaluated.args.iter().filter_map(
TyOrConstInferVar::maybe_from_generic_arg,
));
- Err(ErrorHandled::TooGeneric)
}
- _ => Err(e),
- },
+ _ => {}
+ }
+ Err(e)
}
- } else {
- Ok(c)
}
- };
+ } else {
+ Ok(c)
+ }
+ };
match (evaluate(c1), evaluate(c2)) {
(Ok(c1), Ok(c2)) => {
@@ -602,13 +603,14 @@ impl<'a, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'tcx> {
),
}
}
- (Err(ErrorHandled::Reported(reported)), _)
- | (_, Err(ErrorHandled::Reported(reported))) => ProcessResult::Error(
+ (Err(ErrorHandled::Reported(reported, _)), _)
+ | (_, Err(ErrorHandled::Reported(reported, _))) => ProcessResult::Error(
CodeSelectionError(SelectionError::NotConstEvaluatable(
NotConstEvaluatable::Error(reported.into()),
)),
),
- (Err(ErrorHandled::TooGeneric), _) | (_, Err(ErrorHandled::TooGeneric)) => {
+ (Err(ErrorHandled::TooGeneric(_)), _)
+ | (_, Err(ErrorHandled::TooGeneric(_))) => {
if c1.has_non_region_infer() || c2.has_non_region_infer() {
ProcessResult::Unchanged
} else {
diff --git a/compiler/rustc_trait_selection/src/traits/mod.rs b/compiler/rustc_trait_selection/src/traits/mod.rs
index d2210c6d5..956f8e047 100644
--- a/compiler/rustc_trait_selection/src/traits/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/mod.rs
@@ -288,7 +288,7 @@ pub fn normalize_param_env_or_error<'tcx>(
// should actually be okay since without `feature(generic_const_exprs)` the only
// const arguments that have a non-empty param env are array repeat counts. These
// do not appear in the type system though.
- c.eval(self.0, ty::ParamEnv::empty())
+ c.normalize(self.0, ty::ParamEnv::empty())
}
}
diff --git a/compiler/rustc_trait_selection/src/traits/project.rs b/compiler/rustc_trait_selection/src/traits/project.rs
index 06a1027e5..f7b4794db 100644
--- a/compiler/rustc_trait_selection/src/traits/project.rs
+++ b/compiler/rustc_trait_selection/src/traits/project.rs
@@ -659,6 +659,18 @@ impl<'a, 'b, 'tcx> TypeFolder<TyCtxt<'tcx>> for AssocTypeNormalizer<'a, 'b, 'tcx
normalized_ty
}
ty::Weak => {
+ let recursion_limit = self.interner().recursion_limit();
+ if !recursion_limit.value_within_limit(self.depth) {
+ self.selcx.infcx.err_ctxt().report_overflow_error(
+ &ty,
+ self.cause.span,
+ false,
+ |diag| {
+ diag.note(crate::fluent_generated::trait_selection_ty_alias_overflow);
+ },
+ );
+ }
+
let infcx = self.selcx.infcx;
self.obligations.extend(
infcx.tcx.predicates_of(data.def_id).instantiate_own(infcx.tcx, data.args).map(
@@ -678,7 +690,14 @@ impl<'a, 'b, 'tcx> TypeFolder<TyCtxt<'tcx>> for AssocTypeNormalizer<'a, 'b, 'tcx
},
),
);
- infcx.tcx.type_of(data.def_id).instantiate(infcx.tcx, data.args).fold_with(self)
+ self.depth += 1;
+ let res = infcx
+ .tcx
+ .type_of(data.def_id)
+ .instantiate(infcx.tcx, data.args)
+ .fold_with(self);
+ self.depth -= 1;
+ res
}
ty::Inherent if !data.has_escaping_bound_vars() => {
@@ -742,7 +761,7 @@ impl<'a, 'b, 'tcx> TypeFolder<TyCtxt<'tcx>> for AssocTypeNormalizer<'a, 'b, 'tcx
self.selcx.infcx,
&mut self.universes,
constant,
- |constant| constant.eval(tcx, self.param_env),
+ |constant| constant.normalize(tcx, self.param_env),
)
}
}
@@ -1214,7 +1233,7 @@ fn opt_normalize_projection_type<'a, 'b, 'tcx>(
let projected_term = selcx.infcx.resolve_vars_if_possible(projected_term);
- let result = if projected_term.has_projections() {
+ let mut result = if projected_term.has_projections() {
let mut normalizer = AssocTypeNormalizer::new(
selcx,
param_env,
@@ -1224,14 +1243,14 @@ fn opt_normalize_projection_type<'a, 'b, 'tcx>(
);
let normalized_ty = normalizer.fold(projected_term);
- let mut deduped = SsoHashSet::with_capacity(projected_obligations.len());
- projected_obligations.retain(|obligation| deduped.insert(obligation.clone()));
-
Normalized { value: normalized_ty, obligations: projected_obligations }
} else {
Normalized { value: projected_term, obligations: projected_obligations }
};
+ let mut deduped = SsoHashSet::with_capacity(result.obligations.len());
+ result.obligations.retain(|obligation| deduped.insert(obligation.clone()));
+
if use_cache {
infcx.inner.borrow_mut().projection_cache().insert_term(cache_key, result.clone());
}
@@ -1625,7 +1644,7 @@ fn assemble_candidates_from_object_ty<'cx, 'tcx>(
let env_predicates = data
.projection_bounds()
.filter(|bound| bound.item_def_id() == obligation.predicate.def_id)
- .map(|p| p.with_self_ty(tcx, object_ty).to_predicate(tcx));
+ .map(|p| ty::Clause::from_projection_clause(tcx, p.with_self_ty(tcx, object_ty)));
assemble_candidates_from_predicates(
selcx,
@@ -1794,7 +1813,6 @@ fn assemble_candidates_from_impls<'cx, 'tcx>(
| ty::Closure(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Never
| ty::Tuple(..)
// Integers and floats always have `u8` as their discriminant.
@@ -1844,7 +1862,6 @@ fn assemble_candidates_from_impls<'cx, 'tcx>(
| ty::Closure(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Never
// Extern types have unit metadata, according to RFC 2850
| ty::Foreign(_)
diff --git a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs
index 9484a50e3..620b992ee 100644
--- a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs
@@ -36,7 +36,6 @@ pub fn trivial_dropck_outlives<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
| ty::FnPtr(_)
| ty::Char
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::RawPtr(_)
| ty::Ref(..)
| ty::Str
@@ -134,7 +133,7 @@ pub fn compute_dropck_outlives_inner<'tcx>(
result.overflows.len(),
ty_stack.len()
);
- dtorck_constraint_for_ty_inner(tcx, DUMMY_SP, for_ty, depth, ty, &mut constraints)?;
+ dtorck_constraint_for_ty_inner(tcx, param_env, DUMMY_SP, depth, ty, &mut constraints)?;
// "outlives" represent types/regions that may be touched
// by a destructor.
@@ -186,16 +185,15 @@ pub fn compute_dropck_outlives_inner<'tcx>(
/// Returns a set of constraints that needs to be satisfied in
/// order for `ty` to be valid for destruction.
+#[instrument(level = "debug", skip(tcx, param_env, span, constraints))]
pub fn dtorck_constraint_for_ty_inner<'tcx>(
tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
span: Span,
- for_ty: Ty<'tcx>,
depth: usize,
ty: Ty<'tcx>,
constraints: &mut DropckConstraint<'tcx>,
) -> Result<(), NoSolution> {
- debug!("dtorck_constraint_for_ty_inner({:?}, {:?}, {:?}, {:?})", span, for_ty, depth, ty);
-
if !tcx.recursion_limit().value_within_limit(depth) {
constraints.overflows.push(ty);
return Ok(());
@@ -218,21 +216,20 @@ pub fn dtorck_constraint_for_ty_inner<'tcx>(
| ty::Ref(..)
| ty::FnDef(..)
| ty::FnPtr(_)
- | ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..) => {
+ | ty::GeneratorWitness(..) => {
// these types never have a destructor
}
ty::Array(ety, _) | ty::Slice(ety) => {
// single-element containers, behave like their element
rustc_data_structures::stack::ensure_sufficient_stack(|| {
- dtorck_constraint_for_ty_inner(tcx, span, for_ty, depth + 1, *ety, constraints)
+ dtorck_constraint_for_ty_inner(tcx, param_env, span, depth + 1, *ety, constraints)
})?;
}
ty::Tuple(tys) => rustc_data_structures::stack::ensure_sufficient_stack(|| {
for ty in tys.iter() {
- dtorck_constraint_for_ty_inner(tcx, span, for_ty, depth + 1, ty, constraints)?;
+ dtorck_constraint_for_ty_inner(tcx, param_env, span, depth + 1, ty, constraints)?;
}
Ok::<_, NoSolution>(())
})?,
@@ -251,7 +248,14 @@ pub fn dtorck_constraint_for_ty_inner<'tcx>(
rustc_data_structures::stack::ensure_sufficient_stack(|| {
for ty in args.as_closure().upvar_tys() {
- dtorck_constraint_for_ty_inner(tcx, span, for_ty, depth + 1, ty, constraints)?;
+ dtorck_constraint_for_ty_inner(
+ tcx,
+ param_env,
+ span,
+ depth + 1,
+ ty,
+ constraints,
+ )?;
}
Ok::<_, NoSolution>(())
})?
@@ -280,8 +284,8 @@ pub fn dtorck_constraint_for_ty_inner<'tcx>(
// only take place through references with lifetimes
// derived from lifetimes attached to the upvars and resume
// argument, and we *do* incorporate those here.
-
- if !args.as_generator().is_valid() {
+ let args = args.as_generator();
+ if !args.is_valid() {
// By the time this code runs, all type variables ought to
// be fully resolved.
tcx.sess.delay_span_bug(
@@ -291,10 +295,13 @@ pub fn dtorck_constraint_for_ty_inner<'tcx>(
return Err(NoSolution);
}
- constraints
- .outlives
- .extend(args.as_generator().upvar_tys().iter().map(ty::GenericArg::from));
- constraints.outlives.push(args.as_generator().resume_ty().into());
+ // While we conservatively assume that all coroutines require drop
+ // to avoid query cycles during MIR building, we can check the actual
+ // witness during borrowck to avoid unnecessary liveness constraints.
+ if args.witness().needs_drop(tcx, tcx.erase_regions(param_env)) {
+ constraints.outlives.extend(args.upvar_tys().iter().map(ty::GenericArg::from));
+ constraints.outlives.push(args.resume_ty().into());
+ }
}
ty::Adt(def, args) => {
diff --git a/compiler/rustc_trait_selection/src/traits/query/normalize.rs b/compiler/rustc_trait_selection/src/traits/query/normalize.rs
index 87beaddc6..f785211c5 100644
--- a/compiler/rustc_trait_selection/src/traits/query/normalize.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/normalize.rs
@@ -358,7 +358,7 @@ impl<'cx, 'tcx> FallibleTypeFolder<TyCtxt<'tcx>> for QueryNormalizer<'cx, 'tcx>
self.infcx,
&mut self.universes,
constant,
- |constant| constant.eval(self.infcx.tcx, self.param_env),
+ |constant| constant.normalize(self.infcx.tcx, self.param_env),
))
}
diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs
index 979498fb6..e415d7047 100644
--- a/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs
@@ -13,7 +13,7 @@ use rustc_span::def_id::CRATE_DEF_ID;
use rustc_span::source_map::DUMMY_SP;
use smallvec::{smallvec, SmallVec};
-#[derive(Copy, Clone, Debug, HashStable, TypeFoldable, TypeVisitable, Lift)]
+#[derive(Copy, Clone, Debug, HashStable, TypeFoldable, TypeVisitable)]
pub struct ImpliedOutlivesBounds<'tcx> {
pub ty: Ty<'tcx>,
}
diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/outlives.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/outlives.rs
index 59f4a22ac..f2c1243f9 100644
--- a/compiler/rustc_trait_selection/src/traits/query/type_op/outlives.rs
+++ b/compiler/rustc_trait_selection/src/traits/query/type_op/outlives.rs
@@ -6,7 +6,7 @@ use crate::traits::ObligationCtxt;
use rustc_middle::traits::query::{DropckOutlivesResult, NoSolution};
use rustc_middle::ty::{ParamEnvAnd, Ty, TyCtxt};
-#[derive(Copy, Clone, Debug, HashStable, TypeFoldable, TypeVisitable, Lift)]
+#[derive(Copy, Clone, Debug, HashStable, TypeFoldable, TypeVisitable)]
pub struct DropckOutlives<'tcx> {
dropped_ty: Ty<'tcx>,
}
diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
index e3da87a22..bead8758a 100644
--- a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
@@ -436,8 +436,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
| ty::Ref(_, _, _)
| ty::Closure(_, _)
| ty::Generator(_, _, _)
- | ty::GeneratorWitness(_)
- | ty::GeneratorWitnessMIR(_, _)
+ | ty::GeneratorWitness(..)
| ty::Never
| ty::Tuple(_)
| ty::Error(_) => return true,
@@ -569,8 +568,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
| ty::Generator(..)
| ty::Never
| ty::Tuple(_)
- | ty::GeneratorWitness(_)
- | ty::GeneratorWitnessMIR(..) => {
+ | ty::GeneratorWitness(..) => {
// Only consider auto impls if there are no manual impls for the root of `self_ty`.
//
// For example, we only consider auto candidates for `&i32: Auto` if no explicit impl
@@ -946,8 +944,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
| ty::Closure(..)
| ty::Generator(..)
| ty::Tuple(_)
- | ty::GeneratorWitness(_)
- | ty::GeneratorWitnessMIR(..) => {
+ | ty::GeneratorWitness(..) => {
// These are built-in, and cannot have a custom `impl const Destruct`.
candidates.vec.push(ConstDestructCandidate(None));
}
@@ -1020,8 +1017,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
| ty::Dynamic(_, _, _)
| ty::Closure(_, _)
| ty::Generator(_, _, _)
- | ty::GeneratorWitness(_)
- | ty::GeneratorWitnessMIR(..)
+ | ty::GeneratorWitness(..)
| ty::Never
| ty::Alias(..)
| ty::Param(_)
@@ -1083,7 +1079,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
| ty::Closure(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Never
| ty::Tuple(..)
| ty::Alias(..)
diff --git a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
index 88d030033..08ee9c73b 100644
--- a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
@@ -535,6 +535,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let assoc_types: Vec<_> = tcx
.associated_items(trait_predicate.def_id())
.in_definition_order()
+ // Associated types that require `Self: Sized` do not show up in the built-in
+ // implementation of `Trait for dyn Trait`, and can be dropped here.
+ .filter(|item| !tcx.generics_require_sized_self(item.def_id))
.filter_map(
|item| if item.kind == ty::AssocKind::Type { Some(item.def_id) } else { None },
)
@@ -548,7 +551,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
obligation.cause.span,
"GATs in trait object shouldn't have been considered",
);
- return Err(SelectionError::Unimplemented);
+ return Err(SelectionError::TraitNotObjectSafe(trait_predicate.trait_ref.def_id));
}
// This maybe belongs in wf, but that can't (doesn't) handle
@@ -1235,10 +1238,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
let generator = args.as_generator();
stack.extend([generator.tupled_upvars_ty(), generator.witness()]);
}
- ty::GeneratorWitness(tys) => {
- stack.extend(tcx.erase_late_bound_regions(tys).to_vec());
- }
- ty::GeneratorWitnessMIR(def_id, args) => {
+ ty::GeneratorWitness(def_id, args) => {
let tcx = self.tcx();
stack.extend(tcx.generator_hidden_types(def_id).map(|bty| {
let ty = bty.instantiate(tcx, args);
diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs
index 19385e2d7..ec46a6769 100644
--- a/compiler/rustc_trait_selection/src/traits/select/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs
@@ -35,7 +35,8 @@ use rustc_hir::def_id::DefId;
use rustc_infer::infer::DefineOpaqueTypes;
use rustc_infer::infer::LateBoundRegionConversionTime;
use rustc_infer::traits::TraitObligation;
-use rustc_middle::dep_graph::{DepKind, DepNodeIndex};
+use rustc_middle::dep_graph::dep_kinds;
+use rustc_middle::dep_graph::DepNodeIndex;
use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::ty::abstract_const::NotConstEvaluatable;
use rustc_middle::ty::fold::BottomUpFolder;
@@ -988,9 +989,10 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
Err(_) => Ok(EvaluatedToErr),
}
}
- (Err(ErrorHandled::Reported(_)), _)
- | (_, Err(ErrorHandled::Reported(_))) => Ok(EvaluatedToErr),
- (Err(ErrorHandled::TooGeneric), _) | (_, Err(ErrorHandled::TooGeneric)) => {
+ (Err(ErrorHandled::Reported(..)), _)
+ | (_, Err(ErrorHandled::Reported(..))) => Ok(EvaluatedToErr),
+ (Err(ErrorHandled::TooGeneric(..)), _)
+ | (_, Err(ErrorHandled::TooGeneric(..))) => {
if c1.has_non_region_infer() || c2.has_non_region_infer() {
Ok(EvaluatedToAmbig)
} else {
@@ -1415,7 +1417,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
OP: FnOnce(&mut Self) -> R,
{
let (result, dep_node) =
- self.tcx().dep_graph.with_anon_task(self.tcx(), DepKind::TraitSelect, || op(self));
+ self.tcx().dep_graph.with_anon_task(self.tcx(), dep_kinds::TraitSelect, || op(self));
self.tcx().dep_graph.read_index(dep_node);
(result, dep_node)
}
@@ -2110,7 +2112,6 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
| ty::Ref(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Array(..)
| ty::Closure(..)
| ty::Never
@@ -2209,22 +2210,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
}
}
- ty::GeneratorWitness(binder) => {
- let witness_tys = binder.skip_binder();
- for witness_ty in witness_tys.iter() {
- let resolved = self.infcx.shallow_resolve(witness_ty);
- if resolved.is_ty_var() {
- return Ambiguous;
- }
- }
- // (*) binder moved here
- let all_vars = self.tcx().mk_bound_variable_kinds_from_iter(
- obligation.predicate.bound_vars().iter().chain(binder.bound_vars().iter()),
- );
- Where(ty::Binder::bind_with_vars(witness_tys.to_vec(), all_vars))
- }
-
- ty::GeneratorWitnessMIR(def_id, ref args) => {
+ ty::GeneratorWitness(def_id, ref args) => {
let hidden_types = bind_generator_hidden_types_above(
self.infcx,
def_id,
@@ -2329,12 +2315,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
t.rebind([ty].into_iter().chain(iter::once(witness)).collect())
}
- ty::GeneratorWitness(types) => {
- debug_assert!(!types.has_escaping_bound_vars());
- types.map_bound(|types| types.to_vec())
- }
-
- ty::GeneratorWitnessMIR(def_id, ref args) => {
+ ty::GeneratorWitness(def_id, ref args) => {
bind_generator_hidden_types_above(self.infcx, def_id, args, t.bound_vars())
}
@@ -2346,14 +2327,15 @@ impl<'tcx> SelectionContext<'_, 'tcx> {
}
ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
- let ty = self.tcx().type_of(def_id);
- if ty.skip_binder().references_error() {
- return Err(SelectionError::OpaqueTypeAutoTraitLeakageUnknown(def_id));
- }
// We can resolve the `impl Trait` to its concrete type,
// which enforces a DAG between the functions requiring
// the auto trait bounds in question.
- t.rebind(vec![ty.instantiate(self.tcx(), args)])
+ match self.tcx().type_of_opaque(def_id) {
+ Ok(ty) => t.rebind(vec![ty.instantiate(self.tcx(), args)]),
+ Err(_) => {
+ return Err(SelectionError::OpaqueTypeAutoTraitLeakageUnknown(def_id));
+ }
+ }
}
})
}
@@ -3093,32 +3075,33 @@ fn bind_generator_hidden_types_above<'tcx>(
.generator_hidden_types(def_id)
// Deduplicate tys to avoid repeated work.
.filter(|bty| seen_tys.insert(*bty))
- .map(|bty| {
- let mut ty = bty.instantiate(tcx, args);
-
+ .map(|mut bty| {
// Only remap erased regions if we use them.
if considering_regions {
- ty = tcx.fold_regions(ty, |r, current_depth| match r.kind() {
- ty::ReErased => {
- let br = ty::BoundRegion {
- var: ty::BoundVar::from_u32(counter),
- kind: ty::BrAnon(None),
- };
- counter += 1;
- ty::Region::new_late_bound(tcx, current_depth, br)
- }
- r => bug!("unexpected region: {r:?}"),
+ bty = bty.map_bound(|ty| {
+ tcx.fold_regions(ty, |r, current_depth| match r.kind() {
+ ty::ReErased => {
+ let br = ty::BoundRegion {
+ var: ty::BoundVar::from_u32(counter),
+ kind: ty::BrAnon,
+ };
+ counter += 1;
+ ty::Region::new_late_bound(tcx, current_depth, br)
+ }
+ r => bug!("unexpected region: {r:?}"),
+ })
})
}
- ty
+ bty.instantiate(tcx, args)
})
.collect();
if considering_regions {
debug_assert!(!hidden_types.has_erased_regions());
}
- let bound_vars = tcx.mk_bound_variable_kinds_from_iter(bound_vars.iter().chain(
- (num_bound_variables..counter).map(|_| ty::BoundVariableKind::Region(ty::BrAnon(None))),
- ));
+ let bound_vars =
+ tcx.mk_bound_variable_kinds_from_iter(bound_vars.iter().chain(
+ (num_bound_variables..counter).map(|_| ty::BoundVariableKind::Region(ty::BrAnon)),
+ ));
ty::Binder::bind_with_vars(hidden_types, bound_vars)
}
diff --git a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs
index 729cf2f33..efab29743 100644
--- a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs
@@ -472,17 +472,11 @@ pub(crate) fn to_pretty_impl_header(tcx: TyCtxt<'_>, impl_def_id: DefId) -> Opti
let mut types_without_default_bounds = FxIndexSet::default();
let sized_trait = tcx.lang_items().sized_trait();
- if !args.is_empty() {
+ let arg_names = args.iter().map(|k| k.to_string()).filter(|k| k != "'_").collect::<Vec<_>>();
+ if !arg_names.is_empty() {
types_without_default_bounds.extend(args.types());
w.push('<');
- w.push_str(
- &args
- .iter()
- .map(|k| k.to_string())
- .filter(|k| k != "'_")
- .collect::<Vec<_>>()
- .join(", "),
- );
+ w.push_str(&arg_names.join(", "));
w.push('>');
}
diff --git a/compiler/rustc_trait_selection/src/traits/structural_match.rs b/compiler/rustc_trait_selection/src/traits/structural_match.rs
index 0864e4dc8..fc9b42436 100644
--- a/compiler/rustc_trait_selection/src/traits/structural_match.rs
+++ b/compiler/rustc_trait_selection/src/traits/structural_match.rs
@@ -79,7 +79,7 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for Search<'tcx> {
ty::Closure(..) => {
return ControlFlow::Break(ty);
}
- ty::Generator(..) | ty::GeneratorWitness(..) | ty::GeneratorWitnessMIR(..) => {
+ ty::Generator(..) | ty::GeneratorWitness(..) => {
return ControlFlow::Break(ty);
}
ty::FnDef(..) => {
diff --git a/compiler/rustc_trait_selection/src/traits/structural_normalize.rs b/compiler/rustc_trait_selection/src/traits/structural_normalize.rs
index d3c4dc459..9d6be7689 100644
--- a/compiler/rustc_trait_selection/src/traits/structural_normalize.rs
+++ b/compiler/rustc_trait_selection/src/traits/structural_normalize.rs
@@ -22,9 +22,14 @@ impl<'tcx> StructurallyNormalizeExt<'tcx> for At<'_, 'tcx> {
assert!(!ty.is_ty_var(), "should have resolved vars before calling");
if self.infcx.next_trait_solver() {
- while let ty::Alias(ty::Projection | ty::Inherent | ty::Weak, projection_ty) =
- *ty.kind()
- {
+ // FIXME(-Ztrait-solver=next): correctly handle
+ // overflow here.
+ for _ in 0..256 {
+ let ty::Alias(ty::Projection | ty::Inherent | ty::Weak, projection_ty) = *ty.kind()
+ else {
+ break;
+ };
+
let new_infer_ty = self.infcx.next_ty_var(TypeVariableOrigin {
kind: TypeVariableOriginKind::NormalizeProjectionType,
span: self.cause.span,
@@ -49,6 +54,7 @@ impl<'tcx> StructurallyNormalizeExt<'tcx> for At<'_, 'tcx> {
break;
}
}
+
Ok(ty)
} else {
Ok(self.normalize(ty).into_value_registering_obligations(self.infcx, fulfill_cx))
diff --git a/compiler/rustc_trait_selection/src/traits/vtable.rs b/compiler/rustc_trait_selection/src/traits/vtable.rs
index 427ac3684..e41073937 100644
--- a/compiler/rustc_trait_selection/src/traits/vtable.rs
+++ b/compiler/rustc_trait_selection/src/traits/vtable.rs
@@ -152,7 +152,7 @@ fn prepare_vtable_segments_inner<'tcx, T>(
while let Some((inner_most_trait_ref, emit_vptr, mut siblings)) = stack.pop() {
segment_visitor(VtblSegment::TraitOwnEntries {
trait_ref: inner_most_trait_ref,
- emit_vptr,
+ emit_vptr: emit_vptr && !tcx.sess.opts.unstable_opts.no_trait_vptr,
})?;
// If we've emitted (fed to `segment_visitor`) a trait that has methods present in the vtable,
diff --git a/compiler/rustc_trait_selection/src/traits/wf.rs b/compiler/rustc_trait_selection/src/traits/wf.rs
index f26310665..b04008d9e 100644
--- a/compiler/rustc_trait_selection/src/traits/wf.rs
+++ b/compiler/rustc_trait_selection/src/traits/wf.rs
@@ -609,7 +609,6 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> {
| ty::Error(_)
| ty::Str
| ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Never
| ty::Param(_)
| ty::Bound(..)
diff --git a/compiler/rustc_traits/Cargo.toml b/compiler/rustc_traits/Cargo.toml
index 37e00c0e4..0cdc978a3 100644
--- a/compiler/rustc_traits/Cargo.toml
+++ b/compiler/rustc_traits/Cargo.toml
@@ -8,9 +8,6 @@ tracing = "0.1"
rustc_middle = { path = "../rustc_middle" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_hir = { path = "../rustc_hir" }
-rustc_ast = { path = "../rustc_ast" }
rustc_span = { path = "../rustc_span" }
-rustc_target = { path = "../rustc_target" }
-smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
rustc_infer = { path = "../rustc_infer" }
rustc_trait_selection = { path = "../rustc_trait_selection" }
diff --git a/compiler/rustc_traits/src/dropck_outlives.rs b/compiler/rustc_traits/src/dropck_outlives.rs
index 074764f0c..f40c3614e 100644
--- a/compiler/rustc_traits/src/dropck_outlives.rs
+++ b/compiler/rustc_traits/src/dropck_outlives.rs
@@ -34,6 +34,7 @@ pub(crate) fn adt_dtorck_constraint(
) -> Result<&DropckConstraint<'_>, NoSolution> {
let def = tcx.adt_def(def_id);
let span = tcx.def_span(def_id);
+ let param_env = tcx.param_env(def_id);
debug!("dtorck_constraint: {:?}", def);
if def.is_manually_drop() {
@@ -55,7 +56,7 @@ pub(crate) fn adt_dtorck_constraint(
let mut result = DropckConstraint::empty();
for field in def.all_fields() {
let fty = tcx.type_of(field.did).instantiate_identity();
- dtorck_constraint_for_ty_inner(tcx, span, fty, 0, fty, &mut result)?;
+ dtorck_constraint_for_ty_inner(tcx, param_env, span, 0, fty, &mut result)?;
}
result.outlives.extend(tcx.destructor_constraints(def));
dedup_dtorck_constraint(&mut result);
diff --git a/compiler/rustc_traits/src/normalize_projection_ty.rs b/compiler/rustc_traits/src/normalize_projection_ty.rs
index 0dbac56b4..01bb1ca70 100644
--- a/compiler/rustc_traits/src/normalize_projection_ty.rs
+++ b/compiler/rustc_traits/src/normalize_projection_ty.rs
@@ -3,10 +3,13 @@ use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::query::Providers;
use rustc_middle::ty::{ParamEnvAnd, TyCtxt};
use rustc_trait_selection::infer::InferCtxtBuilderExt;
+use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt;
use rustc_trait_selection::traits::query::{
normalize::NormalizationResult, CanonicalProjectionGoal, NoSolution,
};
-use rustc_trait_selection::traits::{self, ObligationCause, SelectionContext};
+use rustc_trait_selection::traits::{
+ self, FulfillmentErrorCode, ObligationCause, SelectionContext,
+};
use std::sync::atomic::Ordering;
pub(crate) fn provide(p: &mut Providers) {
@@ -40,6 +43,27 @@ fn normalize_projection_ty<'tcx>(
&mut obligations,
);
ocx.register_obligations(obligations);
+ // #112047: With projections and opaques, we are able to create opaques that
+ // are recursive (given some substitution of the opaque's type variables).
+ // In that case, we may only realize a cycle error when calling
+ // `normalize_erasing_regions` in mono.
+ if !ocx.infcx.next_trait_solver() {
+ let errors = ocx.select_where_possible();
+ if !errors.is_empty() {
+ // Rustdoc may attempt to normalize type alias types which are not
+ // well-formed. Rustdoc also normalizes types that are just not
+ // well-formed, since we don't do as much HIR analysis (checking
+ // that impl vars are constrained by the signature, for example).
+ if !tcx.sess.opts.actually_rustdoc {
+ for error in &errors {
+ if let FulfillmentErrorCode::CodeCycle(cycle) = &error.code {
+ ocx.infcx.err_ctxt().report_overflow_obligation_cycle(cycle);
+ }
+ }
+ }
+ return Err(NoSolution);
+ }
+ }
// FIXME(associated_const_equality): All users of normalize_projection_ty expected
// a type, but there is the possibility it could've been a const now. Maybe change
// it to a Term later?
diff --git a/compiler/rustc_transmute/src/layout/tree.rs b/compiler/rustc_transmute/src/layout/tree.rs
index e8ddb0a43..49f24f66b 100644
--- a/compiler/rustc_transmute/src/layout/tree.rs
+++ b/compiler/rustc_transmute/src/layout/tree.rs
@@ -189,6 +189,8 @@ pub(crate) mod rustc {
Unspecified,
/// This error will be surfaced elsewhere by rustc, so don't surface it.
UnknownLayout,
+ /// Overflow size
+ SizeOverflow,
TypeError(ErrorGuaranteed),
}
@@ -196,6 +198,7 @@ pub(crate) mod rustc {
fn from(err: &LayoutError<'tcx>) -> Self {
match err {
LayoutError::Unknown(..) | LayoutError::ReferencesError(..) => Self::UnknownLayout,
+ LayoutError::SizeOverflow(..) => Self::SizeOverflow,
err => unimplemented!("{:?}", err),
}
}
diff --git a/compiler/rustc_transmute/src/lib.rs b/compiler/rustc_transmute/src/lib.rs
index 05ad4a4a1..6c49e94dc 100644
--- a/compiler/rustc_transmute/src/lib.rs
+++ b/compiler/rustc_transmute/src/lib.rs
@@ -64,6 +64,10 @@ pub enum Reason {
SrcLayoutUnknown,
/// The layout of dst is unknown
DstLayoutUnknown,
+ /// The size of src is overflow
+ SrcSizeOverflow,
+ /// The size of dst is overflow
+ DstSizeOverflow,
}
#[cfg(feature = "rustc")]
@@ -125,19 +129,16 @@ mod rustc {
c: Const<'tcx>,
) -> Option<Self> {
use rustc_middle::ty::ScalarInt;
- use rustc_middle::ty::TypeVisitableExt;
use rustc_span::symbol::sym;
- let c = c.eval(tcx, param_env);
-
- if let Err(err) = c.error_reported() {
+ let Ok(cv) = c.eval(tcx, param_env, None) else {
return Some(Self {
alignment: true,
lifetimes: true,
safety: true,
validity: true,
});
- }
+ };
let adt_def = c.ty().ty_adt_def()?;
@@ -149,8 +150,8 @@ mod rustc {
);
let variant = adt_def.non_enum_variant();
- let fields = match c.try_to_valtree() {
- Some(ValTree::Branch(branch)) => branch,
+ let fields = match cv {
+ ValTree::Branch(branch) => branch,
_ => {
return Some(Self {
alignment: true,
diff --git a/compiler/rustc_transmute/src/maybe_transmutable/mod.rs b/compiler/rustc_transmute/src/maybe_transmutable/mod.rs
index b223a90f7..c0141f1f8 100644
--- a/compiler/rustc_transmute/src/maybe_transmutable/mod.rs
+++ b/compiler/rustc_transmute/src/maybe_transmutable/mod.rs
@@ -85,6 +85,8 @@ mod rustc {
(_, Err(Err::UnknownLayout)) => Answer::No(Reason::DstLayoutUnknown),
(Err(Err::Unspecified), _) => Answer::No(Reason::SrcIsUnspecified),
(_, Err(Err::Unspecified)) => Answer::No(Reason::DstIsUnspecified),
+ (Err(Err::SizeOverflow), _) => Answer::No(Reason::SrcSizeOverflow),
+ (_, Err(Err::SizeOverflow)) => Answer::No(Reason::DstSizeOverflow),
(Ok(src), Ok(dst)) => {
MaybeTransmutableQuery { src, dst, scope, assume, context }.answer()
}
diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs
index 4d0b84753..16183403d 100644
--- a/compiler/rustc_ty_utils/src/abi.rs
+++ b/compiler/rustc_ty_utils/src/abi.rs
@@ -172,7 +172,10 @@ fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
use rustc_target::spec::abi::Abi::*;
match tcx.sess.target.adjust_abi(abi) {
RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
- RustCold => Conv::RustCold,
+
+ // This is intentionally not using `Conv::Cold`, as that has to preserve
+ // even SIMD registers, which is generally not a good trade-off.
+ RustCold => Conv::PreserveMost,
// It's the ABI's job to select this, not ours.
System { .. } => bug!("system abi should be selected elsewhere"),
@@ -517,6 +520,8 @@ fn fn_abi_adjust_for_abi<'tcx>(
_ => return,
}
+ // `Aggregate` ABI must be adjusted to ensure that ABI-compatible Rust types are passed
+ // the same way.
let size = arg.layout.size;
if arg.layout.is_unsized() || size > Pointer(AddressSpace::DATA).size(cx) {
@@ -583,19 +588,11 @@ fn make_thin_self_ptr<'tcx>(
// To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
// get a built-in pointer type
let mut fat_pointer_layout = layout;
- 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
- && !fat_pointer_layout.ty.is_ref()
- {
- for i in 0..fat_pointer_layout.fields.count() {
- let field_layout = fat_pointer_layout.field(cx, i);
-
- if !field_layout.is_zst() {
- fat_pointer_layout = field_layout;
- continue 'descend_newtypes;
- }
- }
-
- bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
+ while !fat_pointer_layout.ty.is_unsafe_ptr() && !fat_pointer_layout.ty.is_ref() {
+ fat_pointer_layout = fat_pointer_layout
+ .non_1zst_field(cx)
+ .expect("not exactly one non-1-ZST field in a `DispatchFromDyn` type")
+ .1
}
fat_pointer_layout.ty
diff --git a/compiler/rustc_ty_utils/src/implied_bounds.rs b/compiler/rustc_ty_utils/src/implied_bounds.rs
index 436f10a4f..ec2e0daaf 100644
--- a/compiler/rustc_ty_utils/src/implied_bounds.rs
+++ b/compiler/rustc_ty_utils/src/implied_bounds.rs
@@ -119,7 +119,7 @@ fn assumed_wf_types<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &'tcx [(Ty<'
},
DefKind::AssocConst | DefKind::AssocTy => tcx.assumed_wf_types(tcx.local_parent(def_id)),
DefKind::OpaqueTy => match tcx.def_kind(tcx.local_parent(def_id)) {
- DefKind::TyAlias { .. } => ty::List::empty(),
+ DefKind::TyAlias => ty::List::empty(),
DefKind::AssocTy => tcx.assumed_wf_types(tcx.local_parent(def_id)),
// Nested opaque types only occur in associated types:
// ` type Opaque<T> = impl Trait<&'static T, AssocTy = impl Nested>; `
@@ -136,7 +136,7 @@ fn assumed_wf_types<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &'tcx [(Ty<'
| DefKind::Enum
| DefKind::Variant
| DefKind::Trait
- | DefKind::TyAlias { .. }
+ | DefKind::TyAlias
| DefKind::ForeignTy
| DefKind::TraitAlias
| DefKind::TyParam
diff --git a/compiler/rustc_ty_utils/src/instance.rs b/compiler/rustc_ty_utils/src/instance.rs
index e1a15b5cf..91f1c2131 100644
--- a/compiler/rustc_ty_utils/src/instance.rs
+++ b/compiler/rustc_ty_utils/src/instance.rs
@@ -55,6 +55,7 @@ fn resolve_instance<'tcx>(
}
} else {
debug!(" => free item");
+ // FIXME(effects): we may want to erase the effect param if that is present on this item.
ty::InstanceDef::Item(def_id)
};
@@ -140,11 +141,34 @@ fn resolve_associated_item<'tcx>(
false
}
};
-
if !eligible {
return Ok(None);
}
+ // HACK: We may have overlapping `dyn Trait` built-in impls and
+ // user-provided blanket impls. Detect that case here, and return
+ // ambiguity.
+ //
+ // This should not affect totally monomorphized contexts, only
+ // resolve calls that happen polymorphically, such as the mir-inliner
+ // and const-prop (and also some lints).
+ let self_ty = rcvr_args.type_at(0);
+ if !self_ty.is_known_rigid() {
+ let predicates = tcx
+ .predicates_of(impl_data.impl_def_id)
+ .instantiate(tcx, impl_data.args)
+ .predicates;
+ let sized_def_id = tcx.lang_items().sized_trait();
+ // If we find a `Self: Sized` bound on the item, then we know
+ // that `dyn Trait` can certainly never apply here.
+ if !predicates.into_iter().filter_map(ty::Clause::as_trait_clause).any(|clause| {
+ Some(clause.def_id()) == sized_def_id
+ && clause.skip_binder().self_ty() == self_ty
+ }) {
+ return Ok(None);
+ }
+ }
+
// Any final impl is required to define all associated items.
if !leaf_def.item.defaultness(tcx).has_value() {
let guard = tcx.sess.delay_span_bug(
diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
index 6b4273c03..5bd68d7cc 100644
--- a/compiler/rustc_ty_utils/src/layout.rs
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -7,6 +7,7 @@ use rustc_middle::query::Providers;
use rustc_middle::ty::layout::{
IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES,
};
+use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::{
self, AdtDef, EarlyBinder, GenericArgsRef, ReprOptions, Ty, TyCtxt, TypeVisitableExt,
};
@@ -35,6 +36,9 @@ fn layout_of<'tcx>(
let (param_env, ty) = query.into_parts();
debug!(?ty);
+ // Optimization: We convert to RevealAll and convert opaque types in the where bounds
+ // to their hidden types. This reduces overall uncached invocations of `layout_of` and
+ // is thus a small performance improvement.
let param_env = param_env.with_reveal_all_normalized(tcx);
let unnormalized_ty = ty;
@@ -192,7 +196,7 @@ fn layout_of_uncached<'tcx>(
let metadata_layout = cx.layout_of(metadata_ty)?;
// If the metadata is a 1-zst, then the pointer is thin.
- if metadata_layout.is_zst() && metadata_layout.align.abi.bytes() == 1 {
+ if metadata_layout.is_1zst() {
return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr)));
}
@@ -573,11 +577,7 @@ fn layout_of_uncached<'tcx>(
return Err(error(cx, LayoutError::Unknown(ty)));
}
- ty::Bound(..)
- | ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
- | ty::Infer(_)
- | ty::Error(_) => {
+ ty::Bound(..) | ty::GeneratorWitness(..) | ty::Infer(_) | ty::Error(_) => {
bug!("Layout::compute: unexpected type `{}`", ty)
}
@@ -937,7 +937,7 @@ fn record_layout_for_printing_outlined<'tcx>(
// (delay format until we actually need it)
let record = |kind, packed, opt_discr_size, variants| {
- let type_desc = format!("{:?}", layout.ty);
+ let type_desc = with_no_trimmed_paths!(format!("{}", layout.ty));
cx.tcx.sess.code_stats.record_type_size(
kind,
type_desc,
diff --git a/compiler/rustc_ty_utils/src/needs_drop.rs b/compiler/rustc_ty_utils/src/needs_drop.rs
index 1fc5d9359..51a6d6235 100644
--- a/compiler/rustc_ty_utils/src/needs_drop.rs
+++ b/compiler/rustc_ty_utils/src/needs_drop.rs
@@ -7,7 +7,7 @@ use rustc_middle::ty::util::{needs_drop_components, AlwaysRequiresDrop};
use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, EarlyBinder, Ty, TyCtxt};
use rustc_session::Limit;
-use rustc_span::{sym, DUMMY_SP};
+use rustc_span::sym;
use crate::errors::NeedsDropOverflow;
@@ -66,6 +66,9 @@ fn has_significant_drop_raw<'tcx>(
struct NeedsDropTypes<'tcx, F> {
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
+ // Whether to reveal coroutine witnesses, this is set
+ // to `false` unless we compute `needs_drop` for a coroutine witness.
+ reveal_coroutine_witnesses: bool,
query_ty: Ty<'tcx>,
seen_tys: FxHashSet<Ty<'tcx>>,
/// A stack of types left to process, and the recursion depth when we
@@ -89,6 +92,7 @@ impl<'tcx, F> NeedsDropTypes<'tcx, F> {
Self {
tcx,
param_env,
+ reveal_coroutine_witnesses: false,
seen_tys,
query_ty: ty,
unchecked_tys: vec![(ty, 0)],
@@ -133,8 +137,31 @@ where
// The information required to determine whether a generator has drop is
// computed on MIR, while this very method is used to build MIR.
// To avoid cycles, we consider that generators always require drop.
- ty::Generator(..) if tcx.sess.opts.unstable_opts.drop_tracking_mir => {
- return Some(Err(AlwaysRequiresDrop));
+ //
+ // HACK: Because we erase regions contained in the coroutine witness, we
+ // have to conservatively assume that every region captured by the
+ // coroutine has to be live when dropped. This results in a lot of
+ // undesirable borrowck errors. During borrowck, we call `needs_drop`
+ // for the coroutine witness and check whether any of the contained types
+ // need to be dropped, and only require the captured types to be live
+ // if they do.
+ ty::Generator(_, args, _) => {
+ if self.reveal_coroutine_witnesses {
+ queue_type(self, args.as_generator().witness());
+ } else {
+ return Some(Err(AlwaysRequiresDrop));
+ }
+ }
+ ty::GeneratorWitness(def_id, args) => {
+ if let Some(witness) = tcx.mir_generator_witnesses(def_id) {
+ self.reveal_coroutine_witnesses = true;
+ for field_ty in &witness.field_tys {
+ queue_type(
+ self,
+ EarlyBinder::bind(field_ty.ty).instantiate(tcx, args),
+ );
+ }
+ }
}
_ if component.is_copy_modulo_regions(tcx, self.param_env) => (),
@@ -145,29 +172,6 @@ where
}
}
- ty::Generator(def_id, args, _) => {
- let args = args.as_generator();
- for upvar in args.upvar_tys() {
- queue_type(self, upvar);
- }
-
- let witness = args.witness();
- let interior_tys = match witness.kind() {
- &ty::GeneratorWitness(tys) => tcx.erase_late_bound_regions(tys),
- _ => {
- tcx.sess.delay_span_bug(
- tcx.hir().span_if_local(def_id).unwrap_or(DUMMY_SP),
- format!("unexpected generator witness type {witness:?}"),
- );
- return Some(Err(AlwaysRequiresDrop));
- }
- };
-
- for interior_ty in interior_tys {
- queue_type(self, interior_ty);
- }
- }
-
// Check for a `Drop` impl and whether this is a union or
// `ManuallyDrop`. If it's a struct or enum without a `Drop`
// impl then check whether the field types need `Drop`.
@@ -214,8 +218,6 @@ where
| ty::FnPtr(..)
| ty::Tuple(_)
| ty::Bound(..)
- | ty::GeneratorWitness(..)
- | ty::GeneratorWitnessMIR(..)
| ty::Never
| ty::Infer(_)
| ty::Error(_) => {
diff --git a/compiler/rustc_ty_utils/src/opaque_types.rs b/compiler/rustc_ty_utils/src/opaque_types.rs
index 38768f0a0..06a30677d 100644
--- a/compiler/rustc_ty_utils/src/opaque_types.rs
+++ b/compiler/rustc_ty_utils/src/opaque_types.rs
@@ -53,9 +53,7 @@ impl<'tcx> OpaqueTypeCollector<'tcx> {
fn parent(&self) -> Option<LocalDefId> {
match self.tcx.def_kind(self.item) {
- DefKind::AnonConst | DefKind::InlineConst | DefKind::Fn | DefKind::TyAlias { .. } => {
- None
- }
+ DefKind::AnonConst | DefKind::InlineConst | DefKind::Fn | DefKind::TyAlias => None,
DefKind::AssocFn | DefKind::AssocTy | DefKind::AssocConst => {
Some(self.tcx.local_parent(self.item))
}
@@ -118,7 +116,7 @@ impl<'tcx> OpaqueTypeCollector<'tcx> {
#[instrument(level = "trace", skip(self))]
fn visit_nested_item(&mut self, id: rustc_hir::ItemId) {
let id = id.owner_id.def_id;
- if let DefKind::TyAlias { .. } = self.collector.tcx.def_kind(id) {
+ if let DefKind::TyAlias = self.collector.tcx.def_kind(id) {
let items = self.collector.tcx.opaque_types_defined_by(id);
self.collector.opaques.extend(items);
}
@@ -297,7 +295,7 @@ fn opaque_types_defined_by<'tcx>(tcx: TyCtxt<'tcx>, item: LocalDefId) -> &'tcx [
collector.collect_body_and_predicate_taits();
}
// We're also doing this for `AssocTy` for the wf checks in `check_opaque_meets_bounds`
- DefKind::TyAlias { .. } | DefKind::AssocTy => {
+ DefKind::TyAlias | DefKind::AssocTy => {
tcx.type_of(item).instantiate_identity().visit_with(&mut collector);
}
DefKind::OpaqueTy => {
diff --git a/compiler/rustc_ty_utils/src/ty.rs b/compiler/rustc_ty_utils/src/ty.rs
index ba0258b63..4234e69e8 100644
--- a/compiler/rustc_ty_utils/src/ty.rs
+++ b/compiler/rustc_ty_utils/src/ty.rs
@@ -4,7 +4,7 @@ use rustc_hir::def::DefKind;
use rustc_index::bit_set::BitSet;
use rustc_middle::query::Providers;
use rustc_middle::ty::{
- self, EarlyBinder, ToPredicate, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor,
+ self, EarlyBinder, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor,
};
use rustc_span::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
use rustc_span::DUMMY_SP;
@@ -21,13 +21,7 @@ fn sized_constraint_for_ty<'tcx>(
Bool | Char | Int(..) | Uint(..) | Float(..) | RawPtr(..) | Ref(..) | FnDef(..)
| FnPtr(_) | Array(..) | Closure(..) | Generator(..) | Never => vec![],
- Str
- | Dynamic(..)
- | Slice(_)
- | Foreign(..)
- | Error(_)
- | GeneratorWitness(..)
- | GeneratorWitnessMIR(..) => {
+ Str | Dynamic(..) | Slice(_) | Foreign(..) | Error(_) | GeneratorWitness(..) => {
// these are never sized - return the target type
vec![ty]
}
@@ -220,13 +214,10 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for ImplTraitInTraitFinder<'_, 'tcx> {
// strategy, then just reinterpret the associated type like an opaque :^)
let default_ty = self.tcx.type_of(shifted_alias_ty.def_id).instantiate(self.tcx, shifted_alias_ty.args);
- self.predicates.push(
- ty::Binder::bind_with_vars(
- ty::ProjectionPredicate { projection_ty: shifted_alias_ty, term: default_ty.into() },
- self.bound_vars,
- )
- .to_predicate(self.tcx),
- );
+ self.predicates.push(ty::Clause::from_projection_clause(self.tcx, ty::Binder::bind_with_vars(
+ ty::ProjectionPredicate { projection_ty: shifted_alias_ty, term: default_ty.into() },
+ self.bound_vars,
+ )));
// We walk the *un-shifted* alias ty, because we're tracking the de bruijn
// binder depth, and if we were to walk `shifted_alias_ty` instead, we'd
@@ -299,9 +290,12 @@ fn issue33140_self_ty(tcx: TyCtxt<'_>, def_id: DefId) -> Option<EarlyBinder<Ty<'
}
/// Check if a function is async.
-fn asyncness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::IsAsync {
+fn asyncness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Asyncness {
let node = tcx.hir().get_by_def_id(def_id);
- node.fn_sig().map_or(hir::IsAsync::NotAsync, |sig| sig.header.asyncness)
+ node.fn_sig().map_or(ty::Asyncness::No, |sig| match sig.header.asyncness {
+ hir::IsAsync::Async(_) => ty::Asyncness::Yes,
+ hir::IsAsync::NotAsync => ty::Asyncness::No,
+ })
}
fn unsizing_params_for_adt<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> BitSet<u32> {
diff --git a/compiler/rustc_type_ir/src/fold.rs b/compiler/rustc_type_ir/src/fold.rs
index 371c61191..e7a6831f5 100644
--- a/compiler/rustc_type_ir/src/fold.rs
+++ b/compiler/rustc_type_ir/src/fold.rs
@@ -11,7 +11,7 @@
//! modification. These are the ones containing the most important type-related
//! information, such as `Ty`, `Predicate`, `Region`, and `Const`.
//!
-//! There are three groups of traits involved in each traversal.
+//! There are three traits involved in each traversal.
//! - `TypeFoldable`. This is implemented once for many types, including:
//! - Types of interest, for which the methods delegate to the folder.
//! - All other types, including generic containers like `Vec` and `Option`.
@@ -51,6 +51,12 @@ use crate::{visit::TypeVisitable, Interner};
///
/// To implement this conveniently, use the derive macro located in
/// `rustc_macros`.
+///
+/// This trait is a sub-trait of `TypeVisitable`. This is because many
+/// `TypeFolder` instances use the methods in `TypeVisitableExt` while folding,
+/// which means in practice almost every foldable type needs to also be
+/// visitable. (However, there are some types that are visitable without being
+/// foldable.)
pub trait TypeFoldable<I: Interner>: TypeVisitable<I> {
/// The entry point for folding. To fold a value `t` with a folder `f`
/// call: `t.try_fold_with(f)`.
@@ -58,7 +64,7 @@ pub trait TypeFoldable<I: Interner>: TypeVisitable<I> {
/// For most types, this just traverses the value, calling `try_fold_with`
/// on each field/element.
///
- /// For types of interest (such as `Ty`), the implementation of method
+ /// For types of interest (such as `Ty`), the implementation of this method
/// calls a folder method specifically for that type (such as
/// `F::try_fold_ty`). This is where control transfers from `TypeFoldable`
/// to `TypeFolder`.
@@ -121,7 +127,7 @@ pub trait TypeFolder<I: Interner>: FallibleTypeFolder<I, Error = !> {
}
// The default region folder is a no-op because `Region` is non-recursive
- // and has no `super_visit_with` method to call. That also explains the
+ // and has no `super_fold_with` method to call. That also explains the
// lack of `I::Region: TypeSuperFoldable<I>` bound on this method.
fn fold_region(&mut self, r: I::Region) -> I::Region {
r
@@ -170,7 +176,7 @@ pub trait FallibleTypeFolder<I: Interner>: Sized {
}
// The default region folder is a no-op because `Region` is non-recursive
- // and has no `super_visit_with` method to call. That also explains the
+ // and has no `super_fold_with` method to call. That also explains the
// lack of `I::Region: TypeSuperFoldable<I>` bound on this method.
fn try_fold_region(&mut self, r: I::Region) -> Result<I::Region, Self::Error> {
Ok(r)
diff --git a/compiler/rustc_type_ir/src/lib.rs b/compiler/rustc_type_ir/src/lib.rs
index b0f8ea7a0..83401b65c 100644
--- a/compiler/rustc_type_ir/src/lib.rs
+++ b/compiler/rustc_type_ir/src/lib.rs
@@ -6,7 +6,7 @@
#![feature(unwrap_infallible)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
-#![cfg_attr(not(bootstrap), allow(internal_features))]
+#![allow(internal_features)]
#[macro_use]
extern crate bitflags;
@@ -41,7 +41,12 @@ pub trait HashStableContext {}
pub trait Interner: Sized {
type AdtDef: Clone + Debug + Hash + Ord;
- type GenericArgsRef: Clone + DebugWithInfcx<Self> + Hash + Ord;
+ type GenericArgsRef: Clone
+ + DebugWithInfcx<Self>
+ + Hash
+ + Ord
+ + IntoIterator<Item = Self::GenericArg>;
+ type GenericArg: Clone + DebugWithInfcx<Self> + Hash + Ord;
type DefId: Clone + Debug + Hash + Ord;
type Binder<T>;
type Ty: Clone + DebugWithInfcx<Self> + Hash + Ord;
@@ -294,6 +299,9 @@ bitflags! {
/// Does this have `Generator` or `GeneratorWitness`?
const HAS_TY_GENERATOR = 1 << 23;
+
+ /// Does this have any binders with bound vars (e.g. that need to be anonymized)?
+ const HAS_BINDER_VARS = 1 << 24;
}
}
@@ -574,16 +582,16 @@ rustc_index::newtype_index! {
pub struct TyVid {}
}
-/// An **int**egral (`u32`, `i32`, `usize`, etc.) type **v**ariable **ID**.
-#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
-pub struct IntVid {
- pub index: u32,
+rustc_index::newtype_index! {
+ /// An **int**egral (`u32`, `i32`, `usize`, etc.) type **v**ariable **ID**.
+ #[debug_format = "?{}i"]
+ pub struct IntVid {}
}
-/// An **float**ing-point (`f32` or `f64`) type **v**ariable **ID**.
-#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
-pub struct FloatVid {
- pub index: u32,
+rustc_index::newtype_index! {
+ /// A **float**ing-point (`f32` or `f64`) type **v**ariable **ID**.
+ #[debug_format = "?{}f"]
+ pub struct FloatVid {}
}
/// A placeholder for a type that hasn't been inferred yet.
@@ -645,11 +653,11 @@ impl UnifyKey for IntVid {
type Value = Option<IntVarValue>;
#[inline] // make this function eligible for inlining - it is quite hot.
fn index(&self) -> u32 {
- self.index
+ self.as_u32()
}
#[inline]
fn from_index(i: u32) -> IntVid {
- IntVid { index: i }
+ IntVid::from_u32(i)
}
fn tag() -> &'static str {
"IntVid"
@@ -662,11 +670,11 @@ impl UnifyKey for FloatVid {
type Value = Option<FloatVarValue>;
#[inline]
fn index(&self) -> u32 {
- self.index
+ self.as_u32()
}
#[inline]
fn from_index(i: u32) -> FloatVid {
- FloatVid { index: i }
+ FloatVid::from_u32(i)
}
fn tag() -> &'static str {
"FloatVid"
@@ -770,18 +778,6 @@ impl fmt::Debug for FloatVarValue {
}
}
-impl fmt::Debug for IntVid {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "?{}i", self.index)
- }
-}
-
-impl fmt::Debug for FloatVid {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "?{}f", self.index)
- }
-}
-
impl fmt::Debug for Variance {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match *self {
diff --git a/compiler/rustc_type_ir/src/sty.rs b/compiler/rustc_type_ir/src/sty.rs
index 72bd50ace..f19e9935f 100644
--- a/compiler/rustc_type_ir/src/sty.rs
+++ b/compiler/rustc_type_ir/src/sty.rs
@@ -146,37 +146,11 @@ pub enum TyKind<I: Interner> {
/// A type representing the types stored inside a generator.
/// This should only appear as part of the `GeneratorArgs`.
///
- /// Note that the captured variables for generators are stored separately
- /// using a tuple in the same way as for closures.
- ///
- /// Unlike upvars, the witness can reference lifetimes from
- /// inside of the generator itself. To deal with them in
- /// the type of the generator, we convert them to higher ranked
- /// lifetimes bound by the witness itself.
- ///
- /// Looking at the following example, the witness for this generator
- /// may end up as something like `for<'a> [Vec<i32>, &'a Vec<i32>]`:
- ///
- /// ```ignore UNSOLVED (ask @compiler-errors, should this error? can we just swap the yields?)
- /// #![feature(generators)]
- /// |a| {
- /// let x = &vec![3];
- /// yield a;
- /// yield x[0];
- /// }
- /// # ;
- /// ```
- GeneratorWitness(I::BinderListTy),
-
- /// A type representing the types stored inside a generator.
- /// This should only appear as part of the `GeneratorArgs`.
- ///
/// Unlike upvars, the witness can reference lifetimes from
/// inside of the generator itself. To deal with them in
/// the type of the generator, we convert them to higher ranked
/// lifetimes bound by the witness itself.
///
- /// This variant is only using when `drop_tracking_mir` is set.
/// This contains the `DefId` and the `GenericArgsRef` of the generator.
/// The actual witness types are computed on MIR by the `mir_generator_witnesses` query.
///
@@ -192,7 +166,7 @@ pub enum TyKind<I: Interner> {
/// }
/// # ;
/// ```
- GeneratorWitnessMIR(I::DefId, I::GenericArgsRef),
+ GeneratorWitness(I::DefId, I::GenericArgsRef),
/// The never type `!`.
Never,
@@ -278,7 +252,7 @@ const fn tykind_discriminant<I: Interner>(value: &TyKind<I>) -> usize {
Dynamic(..) => 14,
Closure(_, _) => 15,
Generator(_, _, _) => 16,
- GeneratorWitness(_) => 17,
+ GeneratorWitness(_, _) => 17,
Never => 18,
Tuple(_) => 19,
Alias(_, _) => 20,
@@ -287,7 +261,6 @@ const fn tykind_discriminant<I: Interner>(value: &TyKind<I>) -> usize {
Placeholder(_) => 23,
Infer(_) => 24,
Error(_) => 25,
- GeneratorWitnessMIR(_, _) => 26,
}
}
@@ -312,8 +285,7 @@ impl<I: Interner> Clone for TyKind<I> {
Dynamic(p, r, repr) => Dynamic(p.clone(), r.clone(), *repr),
Closure(d, s) => Closure(d.clone(), s.clone()),
Generator(d, s, m) => Generator(d.clone(), s.clone(), m.clone()),
- GeneratorWitness(g) => GeneratorWitness(g.clone()),
- GeneratorWitnessMIR(d, s) => GeneratorWitnessMIR(d.clone(), s.clone()),
+ GeneratorWitness(d, s) => GeneratorWitness(d.clone(), s.clone()),
Never => Never,
Tuple(t) => Tuple(t.clone()),
Alias(k, p) => Alias(*k, p.clone()),
@@ -355,10 +327,7 @@ impl<I: Interner> PartialEq for TyKind<I> {
(Generator(a_d, a_s, a_m), Generator(b_d, b_s, b_m)) => {
a_d == b_d && a_s == b_s && a_m == b_m
}
- (GeneratorWitness(a_g), GeneratorWitness(b_g)) => a_g == b_g,
- (GeneratorWitnessMIR(a_d, a_s), GeneratorWitnessMIR(b_d, b_s)) => {
- a_d == b_d && a_s == b_s
- }
+ (GeneratorWitness(a_d, a_s), GeneratorWitness(b_d, b_s)) => a_d == b_d && a_s == b_s,
(Tuple(a_t), Tuple(b_t)) => a_t == b_t,
(Alias(a_i, a_p), Alias(b_i, b_p)) => a_i == b_i && a_p == b_p,
(Param(a_p), Param(b_p)) => a_p == b_p,
@@ -415,10 +384,9 @@ impl<I: Interner> Ord for TyKind<I> {
(Generator(a_d, a_s, a_m), Generator(b_d, b_s, b_m)) => {
a_d.cmp(b_d).then_with(|| a_s.cmp(b_s).then_with(|| a_m.cmp(b_m)))
}
- (GeneratorWitness(a_g), GeneratorWitness(b_g)) => a_g.cmp(b_g),
(
- GeneratorWitnessMIR(a_d, a_s),
- GeneratorWitnessMIR(b_d, b_s),
+ GeneratorWitness(a_d, a_s),
+ GeneratorWitness(b_d, b_s),
) => match Ord::cmp(a_d, b_d) {
Ordering::Equal => Ord::cmp(a_s, b_s),
cmp => cmp,
@@ -483,8 +451,7 @@ impl<I: Interner> hash::Hash for TyKind<I> {
s.hash(state);
m.hash(state)
}
- GeneratorWitness(g) => g.hash(state),
- GeneratorWitnessMIR(d, s) => {
+ GeneratorWitness(d, s) => {
d.hash(state);
s.hash(state);
}
@@ -517,7 +484,21 @@ impl<I: Interner> DebugWithInfcx<I> for TyKind<I> {
Int(i) => write!(f, "{i:?}"),
Uint(u) => write!(f, "{u:?}"),
Float(float) => write!(f, "{float:?}"),
- Adt(d, s) => f.debug_tuple_field2_finish("Adt", d, &this.wrap(s)),
+ Adt(d, s) => {
+ write!(f, "{d:?}")?;
+ let mut s = s.clone().into_iter();
+ let first = s.next();
+ match first {
+ Some(first) => write!(f, "<{:?}", first)?,
+ None => return Ok(()),
+ };
+
+ for arg in s {
+ write!(f, ", {:?}", arg)?;
+ }
+
+ write!(f, ">")
+ }
Foreign(d) => f.debug_tuple_field1_finish("Foreign", d),
Str => write!(f, "str"),
Array(t, c) => write!(f, "[{:?}; {:?}]", &this.wrap(t), &this.wrap(c)),
@@ -544,28 +525,23 @@ impl<I: Interner> DebugWithInfcx<I> for TyKind<I> {
},
Closure(d, s) => f.debug_tuple_field2_finish("Closure", d, &this.wrap(s)),
Generator(d, s, m) => f.debug_tuple_field3_finish("Generator", d, &this.wrap(s), m),
- GeneratorWitness(g) => f.debug_tuple_field1_finish("GeneratorWitness", &this.wrap(g)),
- GeneratorWitnessMIR(d, s) => {
- f.debug_tuple_field2_finish("GeneratorWitnessMIR", d, &this.wrap(s))
+ GeneratorWitness(d, s) => {
+ f.debug_tuple_field2_finish("GeneratorWitness", d, &this.wrap(s))
}
Never => write!(f, "!"),
Tuple(t) => {
- let mut iter = t.clone().into_iter();
-
write!(f, "(")?;
-
- match iter.next() {
- None => return write!(f, ")"),
- Some(ty) => write!(f, "{:?}", &this.wrap(ty))?,
- };
-
- match iter.next() {
- None => return write!(f, ",)"),
- Some(ty) => write!(f, "{:?})", &this.wrap(ty))?,
+ let mut count = 0;
+ for ty in t.clone() {
+ if count > 0 {
+ write!(f, ", ")?;
+ }
+ write!(f, "{:?}", &this.wrap(ty))?;
+ count += 1;
}
-
- for ty in iter {
- write!(f, ", {:?}", &this.wrap(ty))?;
+ // unary tuples need a trailing comma
+ if count == 1 {
+ write!(f, ",")?;
}
write!(f, ")")
}
@@ -668,10 +644,7 @@ where
args.encode(e);
m.encode(e);
}),
- GeneratorWitness(b) => e.emit_enum_variant(disc, |e| {
- b.encode(e);
- }),
- GeneratorWitnessMIR(def_id, args) => e.emit_enum_variant(disc, |e| {
+ GeneratorWitness(def_id, args) => e.emit_enum_variant(disc, |e| {
def_id.encode(e);
args.encode(e);
}),
@@ -748,7 +721,7 @@ where
14 => Dynamic(Decodable::decode(d), Decodable::decode(d), Decodable::decode(d)),
15 => Closure(Decodable::decode(d), Decodable::decode(d)),
16 => Generator(Decodable::decode(d), Decodable::decode(d), Decodable::decode(d)),
- 17 => GeneratorWitness(Decodable::decode(d)),
+ 17 => GeneratorWitness(Decodable::decode(d), Decodable::decode(d)),
18 => Never,
19 => Tuple(Decodable::decode(d)),
20 => Alias(Decodable::decode(d), Decodable::decode(d)),
@@ -757,12 +730,11 @@ where
23 => Placeholder(Decodable::decode(d)),
24 => Infer(Decodable::decode(d)),
25 => Error(Decodable::decode(d)),
- 26 => GeneratorWitnessMIR(Decodable::decode(d), Decodable::decode(d)),
_ => panic!(
"{}",
format!(
"invalid enum variant tag while decoding `{}`, expected 0..{}",
- "TyKind", 27,
+ "TyKind", 26,
)
),
}
@@ -856,10 +828,7 @@ where
args.hash_stable(__hcx, __hasher);
m.hash_stable(__hcx, __hasher);
}
- GeneratorWitness(b) => {
- b.hash_stable(__hcx, __hasher);
- }
- GeneratorWitnessMIR(def_id, args) => {
+ GeneratorWitness(def_id, args) => {
def_id.hash_stable(__hcx, __hasher);
args.hash_stable(__hcx, __hasher);
}
diff --git a/compiler/rustc_type_ir/src/visit.rs b/compiler/rustc_type_ir/src/visit.rs
index 878c7aec6..891a4dda2 100644
--- a/compiler/rustc_type_ir/src/visit.rs
+++ b/compiler/rustc_type_ir/src/visit.rs
@@ -8,7 +8,7 @@
//! visitation. These are the ones containing the most important type-related
//! information, such as `Ty`, `Predicate`, `Region`, and `Const`.
//!
-//! There are three groups of traits involved in each traversal.
+//! There are three traits involved in each traversal.
//! - `TypeVisitable`. This is implemented once for many types, including:
//! - Types of interest, for which the methods delegate to the visitor.
//! - All other types, including generic containers like `Vec` and `Option`.
@@ -17,7 +17,6 @@
//! interest, and defines the visiting "skeleton" for these types. (This
//! excludes `Region` because it is non-recursive, i.e. it never contains
//! other types of interest.)
-//!
//! - `TypeVisitor`. This is implemented for each visitor. This defines how
//! types of interest are visited.
//!
@@ -60,7 +59,7 @@ pub trait TypeVisitable<I: Interner>: fmt::Debug + Clone {
///
/// For types of interest (such as `Ty`), the implementation of this method
/// that calls a visitor method specifically for that type (such as
- /// `V::visit_ty`). This is where control transfers from `TypeFoldable` to
+ /// `V::visit_ty`). This is where control transfers from `TypeVisitable` to
/// `TypeVisitor`.
fn visit_with<V: TypeVisitor<I>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy>;
}
diff --git a/compiler/stable_mir/Cargo.toml b/compiler/stable_mir/Cargo.toml
new file mode 100644
index 000000000..c61e217bf
--- /dev/null
+++ b/compiler/stable_mir/Cargo.toml
@@ -0,0 +1,8 @@
+[package]
+name = "stable_mir"
+version = "0.1.0-preview"
+edition = "2021"
+
+[dependencies]
+tracing = "0.1"
+scoped-tls = "1.0"
diff --git a/compiler/rustc_smir/README.md b/compiler/stable_mir/README.md
index 31dee955f..31dee955f 100644
--- a/compiler/rustc_smir/README.md
+++ b/compiler/stable_mir/README.md
diff --git a/compiler/rustc_smir/rust-toolchain.toml b/compiler/stable_mir/rust-toolchain.toml
index d75e8e33b..d75e8e33b 100644
--- a/compiler/rustc_smir/rust-toolchain.toml
+++ b/compiler/stable_mir/rust-toolchain.toml
diff --git a/compiler/stable_mir/src/fold.rs b/compiler/stable_mir/src/fold.rs
new file mode 100644
index 000000000..6471b2c2a
--- /dev/null
+++ b/compiler/stable_mir/src/fold.rs
@@ -0,0 +1,245 @@
+use std::ops::ControlFlow;
+
+use crate::Opaque;
+
+use super::ty::{
+ Allocation, Binder, Const, ConstDef, ConstantKind, ExistentialPredicate, FnSig, GenericArgKind,
+ GenericArgs, Promoted, Region, RigidTy, TermKind, Ty, TyKind, UnevaluatedConst,
+};
+
+pub trait Folder: Sized {
+ type Break;
+ fn fold_ty(&mut self, ty: &Ty) -> ControlFlow<Self::Break, Ty> {
+ ty.super_fold(self)
+ }
+ fn fold_const(&mut self, c: &Const) -> ControlFlow<Self::Break, Const> {
+ c.super_fold(self)
+ }
+ fn fold_reg(&mut self, reg: &Region) -> ControlFlow<Self::Break, Region> {
+ reg.super_fold(self)
+ }
+}
+
+pub trait Foldable: Sized + Clone {
+ fn fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> {
+ self.super_fold(folder)
+ }
+ fn super_fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self>;
+}
+
+impl Foldable for Ty {
+ fn fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> {
+ folder.fold_ty(self)
+ }
+ fn super_fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> {
+ let mut kind = self.kind();
+ match &mut kind {
+ super::ty::TyKind::RigidTy(ty) => *ty = ty.fold(folder)?,
+ super::ty::TyKind::Alias(_, alias) => alias.args = alias.args.fold(folder)?,
+ super::ty::TyKind::Param(_) => {}
+ super::ty::TyKind::Bound(_, _) => {}
+ }
+ ControlFlow::Continue(kind.into())
+ }
+}
+
+impl Foldable for Const {
+ fn fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> {
+ folder.fold_const(self)
+ }
+ fn super_fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> {
+ let mut this = self.clone();
+ match &mut this.literal {
+ super::ty::ConstantKind::Allocated(alloc) => *alloc = alloc.fold(folder)?,
+ super::ty::ConstantKind::Unevaluated(uv) => *uv = uv.fold(folder)?,
+ super::ty::ConstantKind::Param(_) => {}
+ }
+ this.ty = this.ty.fold(folder)?;
+ ControlFlow::Continue(this)
+ }
+}
+
+impl Foldable for Opaque {
+ fn super_fold<V: Folder>(&self, _folder: &mut V) -> ControlFlow<V::Break, Self> {
+ ControlFlow::Continue(self.clone())
+ }
+}
+
+impl Foldable for Allocation {
+ fn super_fold<V: Folder>(&self, _folder: &mut V) -> ControlFlow<V::Break, Self> {
+ ControlFlow::Continue(self.clone())
+ }
+}
+
+impl Foldable for UnevaluatedConst {
+ fn super_fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> {
+ let UnevaluatedConst { def, args, promoted } = self;
+ ControlFlow::Continue(UnevaluatedConst {
+ def: def.fold(folder)?,
+ args: args.fold(folder)?,
+ promoted: promoted.fold(folder)?,
+ })
+ }
+}
+
+impl Foldable for ConstDef {
+ fn super_fold<V: Folder>(&self, _folder: &mut V) -> ControlFlow<V::Break, Self> {
+ ControlFlow::Continue(*self)
+ }
+}
+
+impl<T: Foldable> Foldable for Option<T> {
+ fn super_fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> {
+ ControlFlow::Continue(match self {
+ Some(val) => Some(val.fold(folder)?),
+ None => None,
+ })
+ }
+}
+
+impl Foldable for Promoted {
+ fn super_fold<V: Folder>(&self, _folder: &mut V) -> ControlFlow<V::Break, Self> {
+ ControlFlow::Continue(*self)
+ }
+}
+
+impl Foldable for GenericArgs {
+ fn super_fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> {
+ ControlFlow::Continue(GenericArgs(self.0.fold(folder)?))
+ }
+}
+
+impl Foldable for Region {
+ fn fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> {
+ folder.fold_reg(self)
+ }
+ fn super_fold<V: Folder>(&self, _: &mut V) -> ControlFlow<V::Break, Self> {
+ ControlFlow::Continue(self.clone())
+ }
+}
+
+impl Foldable for GenericArgKind {
+ fn super_fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> {
+ let mut this = self.clone();
+ match &mut this {
+ GenericArgKind::Lifetime(lt) => *lt = lt.fold(folder)?,
+ GenericArgKind::Type(t) => *t = t.fold(folder)?,
+ GenericArgKind::Const(c) => *c = c.fold(folder)?,
+ }
+ ControlFlow::Continue(this)
+ }
+}
+
+impl Foldable for RigidTy {
+ fn super_fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> {
+ let mut this = self.clone();
+ match &mut this {
+ RigidTy::Bool
+ | RigidTy::Char
+ | RigidTy::Int(_)
+ | RigidTy::Uint(_)
+ | RigidTy::Float(_)
+ | RigidTy::Never
+ | RigidTy::Foreign(_)
+ | RigidTy::Str => {}
+ RigidTy::Array(t, c) => {
+ *t = t.fold(folder)?;
+ *c = c.fold(folder)?;
+ }
+ RigidTy::Slice(inner) => *inner = inner.fold(folder)?,
+ RigidTy::RawPtr(ty, _) => *ty = ty.fold(folder)?,
+ RigidTy::Ref(reg, ty, _) => {
+ *reg = reg.fold(folder)?;
+ *ty = ty.fold(folder)?
+ }
+ RigidTy::FnDef(_, args) => *args = args.fold(folder)?,
+ RigidTy::FnPtr(sig) => *sig = sig.fold(folder)?,
+ RigidTy::Closure(_, args) => *args = args.fold(folder)?,
+ RigidTy::Generator(_, args, _) => *args = args.fold(folder)?,
+ RigidTy::Dynamic(pred, r, _) => {
+ *pred = pred.fold(folder)?;
+ *r = r.fold(folder)?;
+ }
+ RigidTy::Tuple(fields) => *fields = fields.fold(folder)?,
+ RigidTy::Adt(_, args) => *args = args.fold(folder)?,
+ }
+ ControlFlow::Continue(this)
+ }
+}
+
+impl<T: Foldable> Foldable for Vec<T> {
+ fn super_fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> {
+ let mut this = self.clone();
+ for arg in &mut this {
+ *arg = arg.fold(folder)?;
+ }
+ ControlFlow::Continue(this)
+ }
+}
+
+impl<T: Foldable> Foldable for Binder<T> {
+ fn super_fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> {
+ ControlFlow::Continue(Self {
+ value: self.value.fold(folder)?,
+ bound_vars: self.bound_vars.clone(),
+ })
+ }
+}
+
+impl Foldable for ExistentialPredicate {
+ fn super_fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> {
+ let mut this = self.clone();
+ match &mut this {
+ ExistentialPredicate::Trait(tr) => tr.generic_args = tr.generic_args.fold(folder)?,
+ ExistentialPredicate::Projection(p) => {
+ p.term = p.term.fold(folder)?;
+ p.generic_args = p.generic_args.fold(folder)?;
+ }
+ ExistentialPredicate::AutoTrait(_) => {}
+ }
+ ControlFlow::Continue(this)
+ }
+}
+
+impl Foldable for TermKind {
+ fn super_fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> {
+ ControlFlow::Continue(match self {
+ TermKind::Type(t) => TermKind::Type(t.fold(folder)?),
+ TermKind::Const(c) => TermKind::Const(c.fold(folder)?),
+ })
+ }
+}
+
+impl Foldable for FnSig {
+ fn super_fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> {
+ ControlFlow::Continue(Self {
+ inputs_and_output: self.inputs_and_output.fold(folder)?,
+ c_variadic: self.c_variadic,
+ unsafety: self.unsafety,
+ abi: self.abi.clone(),
+ })
+ }
+}
+
+pub enum Never {}
+
+/// In order to instantiate a `Foldable`'s generic parameters with specific arguments,
+/// `GenericArgs` can be used as a `Folder` that replaces all mentions of generic params
+/// with the entries in its list.
+impl Folder for GenericArgs {
+ type Break = Never;
+
+ fn fold_ty(&mut self, ty: &Ty) -> ControlFlow<Self::Break, Ty> {
+ ControlFlow::Continue(match ty.kind() {
+ TyKind::Param(p) => self[p],
+ _ => *ty,
+ })
+ }
+
+ fn fold_const(&mut self, c: &Const) -> ControlFlow<Self::Break, Const> {
+ ControlFlow::Continue(match &c.literal {
+ ConstantKind::Param(p) => self[p.clone()].clone(),
+ _ => c.clone(),
+ })
+ }
+}
diff --git a/compiler/rustc_smir/src/stable_mir/mod.rs b/compiler/stable_mir/src/lib.rs
index 19061742b..104985493 100644
--- a/compiler/rustc_smir/src/stable_mir/mod.rs
+++ b/compiler/stable_mir/src/lib.rs
@@ -1,24 +1,37 @@
-//! Module that implements the public interface to the Stable MIR.
+//! The WIP stable interface to rustc internals.
//!
-//! This module shall contain all type definitions and APIs that we expect 3P tools to invoke to
-//! interact with the compiler.
+//! For more information see <https://github.com/rust-lang/project-stable-mir>
//!
-//! The goal is to eventually move this module to its own crate which shall be published on
-//! [crates.io](https://crates.io).
+//! # Note
+//!
+//! This API is still completely unstable and subject to change.
+
+#![doc(
+ html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/",
+ test(attr(allow(unused_variables), deny(warnings)))
+)]
//!
-//! ## Note:
+//! This crate shall contain all type definitions and APIs that we expect third-party tools to invoke to
+//! interact with the compiler.
//!
-//! There shouldn't be any direct references to internal compiler constructs in this module.
-//! If you need an internal construct, consider using `rustc_internal` or `rustc_smir`.
+//! The goal is to eventually be published on
+//! [crates.io](https://crates.io).
use std::cell::Cell;
+use std::fmt;
+use std::fmt::Debug;
-use crate::rustc_smir::Tables;
+use self::ty::{
+ GenericPredicates, Generics, ImplDef, ImplTrait, Span, TraitDecl, TraitDef, Ty, TyKind,
+};
-use self::ty::{ImplDef, ImplTrait, TraitDecl, TraitDef, Ty, TyKind};
+#[macro_use]
+extern crate scoped_tls;
+pub mod fold;
pub mod mir;
pub mod ty;
+pub mod visitor;
/// Use String for now but we should replace it.
pub type Symbol = String;
@@ -27,7 +40,21 @@ pub type Symbol = String;
pub type CrateNum = usize;
/// A unique identification number for each item accessible for the current compilation unit.
-pub type DefId = usize;
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub struct DefId(pub usize);
+
+impl Debug for DefId {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("DefId")
+ .field("id", &self.0)
+ .field("name", &with(|cx| cx.name_of_def_id(*self)))
+ .finish()
+ }
+}
+
+/// A unique identification number for each provenance
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub struct AllocId(pub usize);
/// A list of crate items.
pub type CrateItems = Vec<CrateItem>;
@@ -38,23 +65,51 @@ pub type TraitDecls = Vec<TraitDef>;
/// A list of impl trait decls.
pub type ImplTraitDecls = Vec<ImplDef>;
+/// An error type used to represent an error that has already been reported by the compiler.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum CompilerError<T> {
+ /// Internal compiler error (I.e.: Compiler crashed).
+ ICE,
+ /// Compilation failed.
+ CompilationFailed,
+ /// Compilation was interrupted.
+ Interrupted(T),
+ /// Compilation skipped. This happens when users invoke rustc to retrieve information such as
+ /// --version.
+ Skipped,
+}
+
/// Holds information about a crate.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Crate {
- pub(crate) id: CrateNum,
+ pub id: CrateNum,
pub name: Symbol,
pub is_local: bool,
}
+pub type DefKind = Opaque;
+
/// Holds information about an item in the crate.
/// For now, it only stores the item DefId. Use functions inside `rustc_internal` module to
/// use this item.
#[derive(Clone, PartialEq, Eq, Debug)]
-pub struct CrateItem(pub(crate) DefId);
+pub struct CrateItem(pub DefId);
impl CrateItem {
pub fn body(&self) -> mir::Body {
- with(|cx| cx.mir_body(self))
+ with(|cx| cx.mir_body(self.0))
+ }
+
+ pub fn span(&self) -> Span {
+ with(|cx| cx.span_of_an_item(self.0))
+ }
+
+ pub fn name(&self) -> String {
+ with(|cx| cx.name_of_def_id(self.0))
+ }
+
+ pub fn kind(&self) -> DefKind {
+ with(|cx| cx.def_kind(self.0))
}
}
@@ -105,11 +160,14 @@ pub trait Context {
fn entry_fn(&mut self) -> Option<CrateItem>;
/// Retrieve all items of the local crate that have a MIR associated with them.
fn all_local_items(&mut self) -> CrateItems;
- fn mir_body(&mut self, item: &CrateItem) -> mir::Body;
+ fn mir_body(&mut self, item: DefId) -> mir::Body;
fn all_trait_decls(&mut self) -> TraitDecls;
fn trait_decl(&mut self, trait_def: &TraitDef) -> TraitDecl;
fn all_trait_impls(&mut self) -> ImplTraitDecls;
fn trait_impl(&mut self, trait_impl: &ImplDef) -> ImplTrait;
+ fn generics_of(&mut self, def_id: DefId) -> Generics;
+ fn predicates_of(&mut self, def_id: DefId) -> GenericPredicates;
+ fn explicit_predicates_of(&mut self, def_id: DefId) -> GenericPredicates;
/// Get information about the local crate.
fn local_crate(&self) -> Crate;
/// Retrieve a list of all external crates.
@@ -118,12 +176,23 @@ pub trait Context {
/// Find a crate with the given name.
fn find_crate(&self, name: &str) -> Option<Crate>;
+ /// Prints the name of given `DefId`
+ fn name_of_def_id(&self, def_id: DefId) -> String;
+
+ /// Prints a human readable form of `Span`
+ fn print_span(&self, span: Span) -> String;
+
+ /// Prints the kind of given `DefId`
+ fn def_kind(&mut self, def_id: DefId) -> DefKind;
+
+ /// `Span` of an item
+ fn span_of_an_item(&mut self, def_id: DefId) -> Span;
+
/// Obtain the representation of a type.
fn ty_kind(&mut self, ty: Ty) -> TyKind;
- /// HACK: Until we have fully stable consumers, we need an escape hatch
- /// to get `DefId`s out of `CrateItem`s.
- fn rustc_tables(&mut self, f: &mut dyn FnMut(&mut Tables<'_>));
+ /// Create a new `Ty` from scratch without information from rustc.
+ fn mk_ty(&mut self, kind: TyKind) -> Ty;
}
// A thread local variable that stores a pointer to the tables mapping between TyCtxt
@@ -143,7 +212,7 @@ pub fn run(mut context: impl Context, f: impl FnOnce()) {
/// Loads the current context and calls a function with it.
/// Do not nest these, as that will ICE.
-pub(crate) fn with<R>(f: impl FnOnce(&mut dyn Context) -> R) -> R {
+pub fn with<R>(f: impl FnOnce(&mut dyn Context) -> R) -> R {
assert!(TLV.is_set());
TLV.with(|tlv| {
let ptr = tlv.get();
@@ -151,3 +220,23 @@ pub(crate) fn with<R>(f: impl FnOnce(&mut dyn Context) -> R) -> R {
f(unsafe { *(ptr as *mut &mut dyn Context) })
})
}
+
+/// A type that provides internal information but that can still be used for debug purpose.
+#[derive(Clone)]
+pub struct Opaque(String);
+
+impl std::fmt::Display for Opaque {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "{}", self.0)
+ }
+}
+
+impl std::fmt::Debug for Opaque {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "{:?}", self.0)
+ }
+}
+
+pub fn opaque<T: Debug>(value: &T) -> Opaque {
+ Opaque(format!("{value:?}"))
+}
diff --git a/compiler/rustc_smir/src/stable_mir/mir.rs b/compiler/stable_mir/src/mir.rs
index a9dbc3463..a9dbc3463 100644
--- a/compiler/rustc_smir/src/stable_mir/mir.rs
+++ b/compiler/stable_mir/src/mir.rs
diff --git a/compiler/rustc_smir/src/stable_mir/mir/body.rs b/compiler/stable_mir/src/mir/body.rs
index c16bd6cbd..f93a1a3a9 100644
--- a/compiler/rustc_smir/src/stable_mir/mir/body.rs
+++ b/compiler/stable_mir/src/mir/body.rs
@@ -1,8 +1,6 @@
-use crate::rustc_internal::Opaque;
-use crate::stable_mir::ty::{
- AdtDef, ClosureDef, Const, GeneratorDef, GenericArgs, Movability, Region,
-};
-use crate::stable_mir::{self, ty::Ty};
+use crate::ty::{AdtDef, ClosureDef, Const, GeneratorDef, GenericArgs, Movability, Region};
+use crate::Opaque;
+use crate::{ty::Ty, Span};
#[derive(Clone, Debug)]
pub struct Body {
@@ -135,9 +133,10 @@ pub enum AsyncGeneratorKind {
}
pub(crate) type LocalDefId = Opaque;
-pub(crate) type CounterValueReference = Opaque;
-pub(crate) type InjectedExpressionId = Opaque;
-pub(crate) type ExpressionOperandId = Opaque;
+/// The rustc coverage data structures are heavily tied to internal details of the
+/// coverage implementation that are likely to change, and are unlikely to be
+/// useful to third-party tools for the foreseeable future.
+pub(crate) type Coverage = Opaque;
/// The FakeReadCause describes the type of pattern why a FakeRead statement exists.
#[derive(Clone, Debug)]
@@ -167,42 +166,6 @@ pub enum Variance {
}
#[derive(Clone, Debug)]
-pub enum Op {
- Subtract,
- Add,
-}
-
-#[derive(Clone, Debug)]
-pub enum CoverageKind {
- Counter {
- function_source_hash: usize,
- id: CounterValueReference,
- },
- Expression {
- id: InjectedExpressionId,
- lhs: ExpressionOperandId,
- op: Op,
- rhs: ExpressionOperandId,
- },
- Unreachable,
-}
-
-#[derive(Clone, Debug)]
-pub struct CodeRegion {
- pub file_name: String,
- pub start_line: usize,
- pub start_col: usize,
- pub end_line: usize,
- pub end_col: usize,
-}
-
-#[derive(Clone, Debug)]
-pub struct Coverage {
- pub kind: CoverageKind,
- pub code_region: Option<CodeRegion>,
-}
-
-#[derive(Clone, Debug)]
pub struct CopyNonOverlapping {
pub src: Operand,
pub dst: Operand,
@@ -250,7 +213,7 @@ pub enum Rvalue {
/// generator lowering, `Generator` aggregate kinds are disallowed too.
Aggregate(AggregateKind, Vec<Operand>),
- /// * `Offset` has the same semantics as [`offset`](pointer::offset), except that the second
+ /// * `Offset` has the same semantics as `<*const T>::offset`, except that the second
/// parameter may be a `usize` as well.
/// * The comparison operations accept `bool`s, `char`s, signed or unsigned integers, floats,
/// raw pointers, or function pointers and return a `bool`. The types of the operands must be
@@ -280,16 +243,14 @@ pub enum Rvalue {
/// deref operation, immediately followed by one or more projections.
CopyForDeref(Place),
- /// Computes the discriminant of the place, returning it as an integer of type
- /// [`discriminant_ty`]. Returns zero for types without discriminant.
+ /// Computes the discriminant of the place, returning it as an integer.
+ /// Returns zero for types without discriminant.
///
/// The validity requirements for the underlying value are undecided for this rvalue, see
/// [#91095]. Note too that the value of the discriminant is not the same thing as the
- /// variant index; use [`discriminant_for_variant`] to convert.
+ /// variant index;
///
- /// [`discriminant_ty`]: rustc_middle::ty::Ty::discriminant_ty
/// [#91095]: https://github.com/rust-lang/rust/issues/91095
- /// [`discriminant_for_variant`]: rustc_middle::ty::Ty::discriminant_for_variant
Discriminant(Place),
/// Yields the length of the place, as a `usize`.
@@ -330,7 +291,7 @@ pub enum Rvalue {
///
/// **Needs clarification**: Are there weird additional semantics here related to the runtime
/// nature of this operation?
- ThreadLocalRef(stable_mir::CrateItem),
+ ThreadLocalRef(crate::CrateItem),
/// Computes a value as described by the operation.
NullaryOp(NullOp, Ty),
@@ -359,7 +320,7 @@ pub enum AggregateKind {
pub enum Operand {
Copy(Place),
Move(Place),
- Constant(String),
+ Constant(Constant),
}
#[derive(Clone, Debug)]
@@ -384,6 +345,13 @@ pub type VariantIdx = usize;
type UserTypeAnnotationIndex = usize;
#[derive(Clone, Debug)]
+pub struct Constant {
+ pub span: Span,
+ pub user_ty: Option<UserTypeAnnotationIndex>,
+ pub literal: Const,
+}
+
+#[derive(Clone, Debug)]
pub struct SwitchTarget {
pub value: u128,
pub target: usize,
@@ -395,9 +363,10 @@ pub enum BorrowKind {
Shared,
/// The immediately borrowed place must be immutable, but projections from
- /// it don't need to be. For example, a shallow borrow of `a.b` doesn't
+ /// it don't need to be. This is used to prevent match guards from replacing
+ /// the scrutinee. For example, a fake borrow of `a.b` doesn't
/// conflict with a mutable borrow of `a.b.c`.
- Shallow,
+ Fake,
/// Data is mutable and not aliasable.
Mut {
@@ -419,7 +388,7 @@ pub enum Mutability {
Mut,
}
-#[derive(Clone, Debug)]
+#[derive(Copy, Clone, Debug)]
pub enum Safety {
Unsafe,
Normal,
diff --git a/compiler/stable_mir/src/ty.rs b/compiler/stable_mir/src/ty.rs
new file mode 100644
index 000000000..6029e3c11
--- /dev/null
+++ b/compiler/stable_mir/src/ty.rs
@@ -0,0 +1,567 @@
+use super::{
+ mir::Safety,
+ mir::{Body, Mutability},
+ with, AllocId, DefId, Symbol,
+};
+use crate::Opaque;
+use std::fmt::{self, Debug, Formatter};
+
+#[derive(Copy, Clone)]
+pub struct Ty(pub usize);
+
+impl Debug for Ty {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Ty").field("id", &self.0).field("kind", &self.kind()).finish()
+ }
+}
+
+impl Ty {
+ pub fn kind(&self) -> TyKind {
+ with(|context| context.ty_kind(*self))
+ }
+}
+
+impl From<TyKind> for Ty {
+ fn from(value: TyKind) -> Self {
+ with(|context| context.mk_ty(value))
+ }
+}
+
+#[derive(Debug, Clone)]
+pub struct Const {
+ pub literal: ConstantKind,
+ pub ty: Ty,
+}
+
+type Ident = Opaque;
+
+#[derive(Debug, Clone)]
+pub struct Region {
+ pub kind: RegionKind,
+}
+
+#[derive(Debug, Clone)]
+pub enum RegionKind {
+ ReEarlyBound(EarlyBoundRegion),
+ ReLateBound(DebruijnIndex, BoundRegion),
+ ReStatic,
+ RePlaceholder(Placeholder<BoundRegion>),
+ ReErased,
+}
+
+pub(crate) type DebruijnIndex = u32;
+
+#[derive(Debug, Clone)]
+pub struct EarlyBoundRegion {
+ pub def_id: RegionDef,
+ pub index: u32,
+ pub name: Symbol,
+}
+
+pub(crate) type BoundVar = u32;
+
+#[derive(Debug, Clone)]
+pub struct BoundRegion {
+ pub var: BoundVar,
+ pub kind: BoundRegionKind,
+}
+
+pub(crate) type UniverseIndex = u32;
+
+#[derive(Debug, Clone)]
+pub struct Placeholder<T> {
+ pub universe: UniverseIndex,
+ pub bound: T,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub struct Span(pub usize);
+
+impl Debug for Span {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Span")
+ .field("id", &self.0)
+ .field("repr", &with(|cx| cx.print_span(*self)))
+ .finish()
+ }
+}
+
+#[derive(Clone, Debug)]
+pub enum TyKind {
+ RigidTy(RigidTy),
+ Alias(AliasKind, AliasTy),
+ Param(ParamTy),
+ Bound(usize, BoundTy),
+}
+
+#[derive(Clone, Debug)]
+pub enum RigidTy {
+ Bool,
+ Char,
+ Int(IntTy),
+ Uint(UintTy),
+ Float(FloatTy),
+ Adt(AdtDef, GenericArgs),
+ Foreign(ForeignDef),
+ Str,
+ Array(Ty, Const),
+ Slice(Ty),
+ RawPtr(Ty, Mutability),
+ Ref(Region, Ty, Mutability),
+ FnDef(FnDef, GenericArgs),
+ FnPtr(PolyFnSig),
+ Closure(ClosureDef, GenericArgs),
+ Generator(GeneratorDef, GenericArgs, Movability),
+ Dynamic(Vec<Binder<ExistentialPredicate>>, Region, DynKind),
+ Never,
+ Tuple(Vec<Ty>),
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum IntTy {
+ Isize,
+ I8,
+ I16,
+ I32,
+ I64,
+ I128,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum UintTy {
+ Usize,
+ U8,
+ U16,
+ U32,
+ U64,
+ U128,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum FloatTy {
+ F32,
+ F64,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum Movability {
+ Static,
+ Movable,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub struct ForeignDef(pub DefId);
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub struct FnDef(pub DefId);
+
+impl FnDef {
+ pub fn body(&self) -> Body {
+ with(|ctx| ctx.mir_body(self.0))
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub struct ClosureDef(pub DefId);
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub struct GeneratorDef(pub DefId);
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub struct ParamDef(pub DefId);
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub struct BrNamedDef(pub DefId);
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub struct AdtDef(pub DefId);
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub struct AliasDef(pub DefId);
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub struct TraitDef(pub DefId);
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub struct GenericDef(pub DefId);
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub struct ConstDef(pub DefId);
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct ImplDef(pub DefId);
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct RegionDef(pub DefId);
+
+#[derive(Clone, Debug)]
+pub struct GenericArgs(pub Vec<GenericArgKind>);
+
+impl std::ops::Index<ParamTy> for GenericArgs {
+ type Output = Ty;
+
+ fn index(&self, index: ParamTy) -> &Self::Output {
+ self.0[index.index as usize].expect_ty()
+ }
+}
+
+impl std::ops::Index<ParamConst> for GenericArgs {
+ type Output = Const;
+
+ fn index(&self, index: ParamConst) -> &Self::Output {
+ self.0[index.index as usize].expect_const()
+ }
+}
+
+#[derive(Clone, Debug)]
+pub enum GenericArgKind {
+ Lifetime(Region),
+ Type(Ty),
+ Const(Const),
+}
+
+impl GenericArgKind {
+ /// Panic if this generic argument is not a type, otherwise
+ /// return the type.
+ #[track_caller]
+ pub fn expect_ty(&self) -> &Ty {
+ match self {
+ GenericArgKind::Type(ty) => ty,
+ _ => panic!("{self:?}"),
+ }
+ }
+
+ /// Panic if this generic argument is not a const, otherwise
+ /// return the const.
+ #[track_caller]
+ pub fn expect_const(&self) -> &Const {
+ match self {
+ GenericArgKind::Const(c) => c,
+ _ => panic!("{self:?}"),
+ }
+ }
+}
+
+#[derive(Clone, Debug)]
+pub enum TermKind {
+ Type(Ty),
+ Const(Const),
+}
+
+#[derive(Clone, Debug)]
+pub enum AliasKind {
+ Projection,
+ Inherent,
+ Opaque,
+ Weak,
+}
+
+#[derive(Clone, Debug)]
+pub struct AliasTy {
+ pub def_id: AliasDef,
+ pub args: GenericArgs,
+}
+
+pub type PolyFnSig = Binder<FnSig>;
+
+#[derive(Clone, Debug)]
+pub struct FnSig {
+ pub inputs_and_output: Vec<Ty>,
+ pub c_variadic: bool,
+ pub unsafety: Safety,
+ pub abi: Abi,
+}
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub enum Abi {
+ Rust,
+ C { unwind: bool },
+ Cdecl { unwind: bool },
+ Stdcall { unwind: bool },
+ Fastcall { unwind: bool },
+ Vectorcall { unwind: bool },
+ Thiscall { unwind: bool },
+ Aapcs { unwind: bool },
+ Win64 { unwind: bool },
+ SysV64 { unwind: bool },
+ PtxKernel,
+ Msp430Interrupt,
+ X86Interrupt,
+ AmdGpuKernel,
+ EfiApi,
+ AvrInterrupt,
+ AvrNonBlockingInterrupt,
+ CCmseNonSecureCall,
+ Wasm,
+ System { unwind: bool },
+ RustIntrinsic,
+ RustCall,
+ PlatformIntrinsic,
+ Unadjusted,
+ RustCold,
+ RiscvInterruptM,
+ RiscvInterruptS,
+}
+
+#[derive(Clone, Debug)]
+pub struct Binder<T> {
+ pub value: T,
+ pub bound_vars: Vec<BoundVariableKind>,
+}
+
+#[derive(Clone, Debug)]
+pub struct EarlyBinder<T> {
+ pub value: T,
+}
+
+#[derive(Clone, Debug)]
+pub enum BoundVariableKind {
+ Ty(BoundTyKind),
+ Region(BoundRegionKind),
+ Const,
+}
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub enum BoundTyKind {
+ Anon,
+ Param(ParamDef, String),
+}
+
+#[derive(Clone, Debug)]
+pub enum BoundRegionKind {
+ BrAnon,
+ BrNamed(BrNamedDef, String),
+ BrEnv,
+}
+
+#[derive(Clone, Debug)]
+pub enum DynKind {
+ Dyn,
+ DynStar,
+}
+
+#[derive(Clone, Debug)]
+pub enum ExistentialPredicate {
+ Trait(ExistentialTraitRef),
+ Projection(ExistentialProjection),
+ AutoTrait(TraitDef),
+}
+
+#[derive(Clone, Debug)]
+pub struct ExistentialTraitRef {
+ pub def_id: TraitDef,
+ pub generic_args: GenericArgs,
+}
+
+#[derive(Clone, Debug)]
+pub struct ExistentialProjection {
+ pub def_id: TraitDef,
+ pub generic_args: GenericArgs,
+ pub term: TermKind,
+}
+
+#[derive(Clone, Debug)]
+pub struct ParamTy {
+ pub index: u32,
+ pub name: String,
+}
+
+#[derive(Clone, Debug)]
+pub struct BoundTy {
+ pub var: usize,
+ pub kind: BoundTyKind,
+}
+
+pub type Bytes = Vec<Option<u8>>;
+pub type Size = usize;
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub struct Prov(pub AllocId);
+pub type Align = u64;
+pub type Promoted = u32;
+pub type InitMaskMaterialized = Vec<u64>;
+
+/// Stores the provenance information of pointers stored in memory.
+#[derive(Clone, Debug)]
+pub struct ProvenanceMap {
+ /// Provenance in this map applies from the given offset for an entire pointer-size worth of
+ /// bytes. Two entries in this map are always at least a pointer size apart.
+ pub ptrs: Vec<(Size, Prov)>,
+}
+
+#[derive(Clone, Debug)]
+pub struct Allocation {
+ pub bytes: Bytes,
+ pub provenance: ProvenanceMap,
+ pub align: Align,
+ pub mutability: Mutability,
+}
+
+#[derive(Clone, Debug)]
+pub enum ConstantKind {
+ Allocated(Allocation),
+ Unevaluated(UnevaluatedConst),
+ Param(ParamConst),
+}
+
+#[derive(Clone, Debug)]
+pub struct ParamConst {
+ pub index: u32,
+ pub name: String,
+}
+
+#[derive(Clone, Debug)]
+pub struct UnevaluatedConst {
+ pub def: ConstDef,
+ pub args: GenericArgs,
+ pub promoted: Option<Promoted>,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum TraitSpecializationKind {
+ None,
+ Marker,
+ AlwaysApplicable,
+}
+
+#[derive(Clone, Debug)]
+pub struct TraitDecl {
+ pub def_id: TraitDef,
+ pub unsafety: Safety,
+ pub paren_sugar: bool,
+ pub has_auto_impl: bool,
+ pub is_marker: bool,
+ pub is_coinductive: bool,
+ pub skip_array_during_method_dispatch: bool,
+ pub specialization_kind: TraitSpecializationKind,
+ pub must_implement_one_of: Option<Vec<Ident>>,
+ pub implement_via_object: bool,
+ pub deny_explicit_impl: bool,
+}
+
+impl TraitDecl {
+ pub fn generics_of(&self) -> Generics {
+ with(|cx| cx.generics_of(self.def_id.0))
+ }
+
+ pub fn predicates_of(&self) -> GenericPredicates {
+ with(|cx| cx.predicates_of(self.def_id.0))
+ }
+
+ pub fn explicit_predicates_of(&self) -> GenericPredicates {
+ with(|cx| cx.explicit_predicates_of(self.def_id.0))
+ }
+}
+
+pub type ImplTrait = EarlyBinder<TraitRef>;
+
+#[derive(Clone, Debug)]
+pub struct TraitRef {
+ pub def_id: TraitDef,
+ pub args: GenericArgs,
+}
+
+#[derive(Clone, Debug)]
+pub struct Generics {
+ pub parent: Option<GenericDef>,
+ pub parent_count: usize,
+ pub params: Vec<GenericParamDef>,
+ pub param_def_id_to_index: Vec<(GenericDef, u32)>,
+ pub has_self: bool,
+ pub has_late_bound_regions: Option<Span>,
+ pub host_effect_index: Option<usize>,
+}
+
+#[derive(Clone, Debug)]
+pub enum GenericParamDefKind {
+ Lifetime,
+ Type { has_default: bool, synthetic: bool },
+ Const { has_default: bool },
+}
+
+#[derive(Clone, Debug)]
+pub struct GenericParamDef {
+ pub name: super::Symbol,
+ pub def_id: GenericDef,
+ pub index: u32,
+ pub pure_wrt_drop: bool,
+ pub kind: GenericParamDefKind,
+}
+
+pub struct GenericPredicates {
+ pub parent: Option<TraitDef>,
+ pub predicates: Vec<(PredicateKind, Span)>,
+}
+
+#[derive(Clone, Debug)]
+pub enum PredicateKind {
+ Clause(ClauseKind),
+ ObjectSafe(TraitDef),
+ ClosureKind(ClosureDef, GenericArgs, ClosureKind),
+ SubType(SubtypePredicate),
+ Coerce(CoercePredicate),
+ ConstEquate(Const, Const),
+ Ambiguous,
+ AliasRelate(TermKind, TermKind, AliasRelationDirection),
+}
+
+#[derive(Clone, Debug)]
+pub enum ClauseKind {
+ Trait(TraitPredicate),
+ RegionOutlives(RegionOutlivesPredicate),
+ TypeOutlives(TypeOutlivesPredicate),
+ Projection(ProjectionPredicate),
+ ConstArgHasType(Const, Ty),
+ WellFormed(GenericArgKind),
+ ConstEvaluatable(Const),
+}
+
+#[derive(Clone, Debug)]
+pub enum ClosureKind {
+ Fn,
+ FnMut,
+ FnOnce,
+}
+
+#[derive(Clone, Debug)]
+pub struct SubtypePredicate {
+ pub a: Ty,
+ pub b: Ty,
+}
+
+#[derive(Clone, Debug)]
+pub struct CoercePredicate {
+ pub a: Ty,
+ pub b: Ty,
+}
+
+#[derive(Clone, Debug)]
+pub enum AliasRelationDirection {
+ Equate,
+ Subtype,
+}
+
+#[derive(Clone, Debug)]
+pub struct TraitPredicate {
+ pub trait_ref: TraitRef,
+ pub polarity: ImplPolarity,
+}
+
+#[derive(Clone, Debug)]
+pub struct OutlivesPredicate<A, B>(pub A, pub B);
+
+pub type RegionOutlivesPredicate = OutlivesPredicate<Region, Region>;
+pub type TypeOutlivesPredicate = OutlivesPredicate<Ty, Region>;
+
+#[derive(Clone, Debug)]
+pub struct ProjectionPredicate {
+ pub projection_ty: AliasTy,
+ pub term: TermKind,
+}
+
+#[derive(Clone, Debug)]
+pub enum ImplPolarity {
+ Positive,
+ Negative,
+ Reservation,
+}
diff --git a/compiler/stable_mir/src/visitor.rs b/compiler/stable_mir/src/visitor.rs
new file mode 100644
index 000000000..961009581
--- /dev/null
+++ b/compiler/stable_mir/src/visitor.rs
@@ -0,0 +1,203 @@
+use std::ops::ControlFlow;
+
+use crate::Opaque;
+
+use super::ty::{
+ Allocation, Binder, Const, ConstDef, ExistentialPredicate, FnSig, GenericArgKind, GenericArgs,
+ Promoted, Region, RigidTy, TermKind, Ty, UnevaluatedConst,
+};
+
+pub trait Visitor: Sized {
+ type Break;
+ fn visit_ty(&mut self, ty: &Ty) -> ControlFlow<Self::Break> {
+ ty.super_visit(self)
+ }
+ fn visit_const(&mut self, c: &Const) -> ControlFlow<Self::Break> {
+ c.super_visit(self)
+ }
+ fn visit_reg(&mut self, reg: &Region) -> ControlFlow<Self::Break> {
+ reg.super_visit(self)
+ }
+}
+
+pub trait Visitable {
+ fn visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> {
+ self.super_visit(visitor)
+ }
+ fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break>;
+}
+
+impl Visitable for Ty {
+ fn visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> {
+ visitor.visit_ty(self)
+ }
+ fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> {
+ match self.kind() {
+ super::ty::TyKind::RigidTy(ty) => ty.visit(visitor)?,
+ super::ty::TyKind::Alias(_, alias) => alias.args.visit(visitor)?,
+ super::ty::TyKind::Param(_) => {}
+ super::ty::TyKind::Bound(_, _) => {}
+ }
+ ControlFlow::Continue(())
+ }
+}
+
+impl Visitable for Const {
+ fn visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> {
+ visitor.visit_const(self)
+ }
+ fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> {
+ match &self.literal {
+ super::ty::ConstantKind::Allocated(alloc) => alloc.visit(visitor)?,
+ super::ty::ConstantKind::Unevaluated(uv) => uv.visit(visitor)?,
+ super::ty::ConstantKind::Param(_) => {}
+ }
+ self.ty.visit(visitor)
+ }
+}
+
+impl Visitable for Opaque {
+ fn super_visit<V: Visitor>(&self, _visitor: &mut V) -> ControlFlow<V::Break> {
+ ControlFlow::Continue(())
+ }
+}
+
+impl Visitable for Allocation {
+ fn super_visit<V: Visitor>(&self, _visitor: &mut V) -> ControlFlow<V::Break> {
+ ControlFlow::Continue(())
+ }
+}
+
+impl Visitable for UnevaluatedConst {
+ fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> {
+ let UnevaluatedConst { def, args, promoted } = self;
+ def.visit(visitor)?;
+ args.visit(visitor)?;
+ promoted.visit(visitor)
+ }
+}
+
+impl Visitable for ConstDef {
+ fn super_visit<V: Visitor>(&self, _visitor: &mut V) -> ControlFlow<V::Break> {
+ ControlFlow::Continue(())
+ }
+}
+
+impl<T: Visitable> Visitable for Option<T> {
+ fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> {
+ match self {
+ Some(val) => val.visit(visitor),
+ None => ControlFlow::Continue(()),
+ }
+ }
+}
+
+impl Visitable for Promoted {
+ fn super_visit<V: Visitor>(&self, _visitor: &mut V) -> ControlFlow<V::Break> {
+ ControlFlow::Continue(())
+ }
+}
+
+impl Visitable for GenericArgs {
+ fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> {
+ self.0.visit(visitor)
+ }
+}
+
+impl Visitable for Region {
+ fn visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> {
+ visitor.visit_reg(self)
+ }
+
+ fn super_visit<V: Visitor>(&self, _: &mut V) -> ControlFlow<V::Break> {
+ ControlFlow::Continue(())
+ }
+}
+
+impl Visitable for GenericArgKind {
+ fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> {
+ match self {
+ GenericArgKind::Lifetime(lt) => lt.visit(visitor),
+ GenericArgKind::Type(t) => t.visit(visitor),
+ GenericArgKind::Const(c) => c.visit(visitor),
+ }
+ }
+}
+
+impl Visitable for RigidTy {
+ fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> {
+ match self {
+ RigidTy::Bool
+ | RigidTy::Char
+ | RigidTy::Int(_)
+ | RigidTy::Uint(_)
+ | RigidTy::Float(_)
+ | RigidTy::Never
+ | RigidTy::Foreign(_)
+ | RigidTy::Str => ControlFlow::Continue(()),
+ RigidTy::Array(t, c) => {
+ t.visit(visitor)?;
+ c.visit(visitor)
+ }
+ RigidTy::Slice(inner) => inner.visit(visitor),
+ RigidTy::RawPtr(ty, _) => ty.visit(visitor),
+ RigidTy::Ref(reg, ty, _) => {
+ reg.visit(visitor);
+ ty.visit(visitor)
+ }
+ RigidTy::FnDef(_, args) => args.visit(visitor),
+ RigidTy::FnPtr(sig) => sig.visit(visitor),
+ RigidTy::Closure(_, args) => args.visit(visitor),
+ RigidTy::Generator(_, args, _) => args.visit(visitor),
+ RigidTy::Dynamic(pred, r, _) => {
+ pred.visit(visitor)?;
+ r.visit(visitor)
+ }
+ RigidTy::Tuple(fields) => fields.visit(visitor),
+ RigidTy::Adt(_, args) => args.visit(visitor),
+ }
+ }
+}
+
+impl<T: Visitable> Visitable for Vec<T> {
+ fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> {
+ for arg in self {
+ arg.visit(visitor)?;
+ }
+ ControlFlow::Continue(())
+ }
+}
+
+impl<T: Visitable> Visitable for Binder<T> {
+ fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> {
+ self.value.visit(visitor)
+ }
+}
+
+impl Visitable for ExistentialPredicate {
+ fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> {
+ match self {
+ ExistentialPredicate::Trait(tr) => tr.generic_args.visit(visitor),
+ ExistentialPredicate::Projection(p) => {
+ p.term.visit(visitor)?;
+ p.generic_args.visit(visitor)
+ }
+ ExistentialPredicate::AutoTrait(_) => ControlFlow::Continue(()),
+ }
+ }
+}
+
+impl Visitable for TermKind {
+ fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> {
+ match self {
+ TermKind::Type(t) => t.visit(visitor),
+ TermKind::Const(c) => c.visit(visitor),
+ }
+ }
+}
+
+impl Visitable for FnSig {
+ fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> {
+ self.inputs_and_output.visit(visitor)
+ }
+}